repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ds-hwang/chromium-crosswalk
|
refs/heads/master
|
third_party/WebKit/Source/bindings/scripts/idl_types.py
|
30
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""IDL type handling.
Classes:
IdlTypeBase
IdlType
IdlUnionType
IdlArrayOrSequenceType
IdlArrayType
IdlSequenceType
IdlNullableType
IdlTypes are picklable because we store them in interfaces_info.
"""
from collections import defaultdict
################################################################################
# IDL types
################################################################################
INTEGER_TYPES = frozenset([
# http://www.w3.org/TR/WebIDL/#dfn-integer-type
'byte',
'octet',
'short',
'unsigned short',
# int and unsigned are not IDL types
'long',
'unsigned long',
'long long',
'unsigned long long',
])
NUMERIC_TYPES = (INTEGER_TYPES | frozenset([
# http://www.w3.org/TR/WebIDL/#dfn-numeric-type
'float',
'unrestricted float',
'double',
'unrestricted double',
]))
# http://www.w3.org/TR/WebIDL/#dfn-primitive-type
PRIMITIVE_TYPES = (frozenset(['boolean']) | NUMERIC_TYPES)
BASIC_TYPES = (PRIMITIVE_TYPES | frozenset([
# Built-in, non-composite, non-object data types
# http://heycam.github.io/webidl/#idl-types
'DOMString',
'ByteString',
'USVString',
'Date',
# http://heycam.github.io/webidl/#idl-types
'void',
]))
TYPE_NAMES = {
# http://heycam.github.io/webidl/#dfn-type-name
'any': 'Any',
'boolean': 'Boolean',
'byte': 'Byte',
'octet': 'Octet',
'short': 'Short',
'unsigned short': 'UnsignedShort',
'long': 'Long',
'unsigned long': 'UnsignedLong',
'long long': 'LongLong',
'unsigned long long': 'UnsignedLongLong',
'float': 'Float',
'unrestricted float': 'UnrestrictedFloat',
'double': 'Double',
'unrestricted double': 'UnrestrictedDouble',
'DOMString': 'String',
'ByteString': 'ByteString',
'USVString': 'USVString',
'object': 'Object',
'Date': 'Date',
}
STRING_TYPES = frozenset([
# http://heycam.github.io/webidl/#es-interface-call (step 10.11)
# (Interface object [[Call]] method's string types.)
'String',
'ByteString',
'USVString',
])
STANDARD_CALLBACK_FUNCTIONS = frozenset([
# http://heycam.github.io/webidl/#common-Function
'Function',
# http://heycam.github.io/webidl/#common-VoidFunction
'VoidFunction',
])
################################################################################
# Inheritance
################################################################################
ancestors = defaultdict(list) # interface_name -> ancestors
def inherits_interface(interface_name, ancestor_name):
return (interface_name == ancestor_name or
ancestor_name in ancestors[interface_name])
def set_ancestors(new_ancestors):
ancestors.update(new_ancestors)
class IdlTypeBase(object):
"""Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType."""
def __str__(self):
raise NotImplementedError(
'__str__() should be defined in subclasses')
def __getattr__(self, name):
# Default undefined attributes to None (analogous to Jinja variables).
# This allows us to not define default properties in the base class, and
# allows us to relay __getattr__ in IdlNullableType to the inner type.
return None
def resolve_typedefs(self, typedefs):
raise NotImplementedError(
'resolve_typedefs should be defined in subclasses')
def idl_types(self):
"""A generator which yields IdlTypes which are referenced from |self|,
including itself."""
yield self
################################################################################
# IdlType
################################################################################
class IdlType(IdlTypeBase):
# FIXME: incorporate Nullable, etc.
# to support types like short?[] vs. short[]?, instead of treating these
# as orthogonal properties (via flags).
callback_functions = set(STANDARD_CALLBACK_FUNCTIONS)
callback_interfaces = set()
dictionaries = set()
enums = {} # name -> values
def __init__(self, base_type, is_unrestricted=False):
super(IdlType, self).__init__()
if is_unrestricted:
self.base_type = 'unrestricted %s' % base_type
else:
self.base_type = base_type
def __str__(self):
return self.base_type
def __getstate__(self):
return {
'base_type': self.base_type,
}
def __setstate__(self, state):
self.base_type = state['base_type']
@property
def is_basic_type(self):
return self.base_type in BASIC_TYPES
@property
def is_callback_function(self):
return self.base_type in IdlType.callback_functions
@property
def is_callback_interface(self):
return self.base_type in IdlType.callback_interfaces
@property
def is_dictionary(self):
return self.base_type in IdlType.dictionaries
@property
def is_enum(self):
# FIXME: add an IdlEnumType class and a resolve_enums step at end of
# IdlDefinitions constructor
return self.name in IdlType.enums
@property
def enum_values(self):
return IdlType.enums.get(self.name)
@property
def enum_type(self):
return self.name if self.is_enum else None
@property
def is_integer_type(self):
return self.base_type in INTEGER_TYPES
@property
def is_numeric_type(self):
return self.base_type in NUMERIC_TYPES
@property
def is_primitive_type(self):
return self.base_type in PRIMITIVE_TYPES
@property
def is_interface_type(self):
# Anything that is not another type is an interface type.
# http://www.w3.org/TR/WebIDL/#idl-types
# http://www.w3.org/TR/WebIDL/#idl-interface
# In C++ these are RefPtr or PassRefPtr types.
return not(self.is_basic_type or
self.is_callback_function or
self.is_dictionary or
self.is_enum or
self.name == 'Any' or
self.name == 'Object' or
self.name == 'Promise') # Promise will be basic in future
@property
def is_string_type(self):
return self.name in STRING_TYPES
@property
def name(self):
"""Return type name
http://heycam.github.io/webidl/#dfn-type-name
"""
base_type = self.base_type
return TYPE_NAMES.get(base_type, base_type)
@classmethod
def set_callback_functions(cls, new_callback_functions):
cls.callback_functions.update(new_callback_functions)
@classmethod
def set_callback_interfaces(cls, new_callback_interfaces):
cls.callback_interfaces.update(new_callback_interfaces)
@classmethod
def set_dictionaries(cls, new_dictionaries):
cls.dictionaries.update(new_dictionaries)
@classmethod
def set_enums(cls, new_enums):
cls.enums.update(new_enums)
def resolve_typedefs(self, typedefs):
# This function either returns |self| or a different object.
# FIXME: Rename typedefs_resolved().
return typedefs.get(self.base_type, self)
################################################################################
# IdlUnionType
################################################################################
class IdlUnionType(IdlTypeBase):
# http://heycam.github.io/webidl/#idl-union
# IdlUnionType has __hash__() and __eq__() methods because they are stored
# in sets.
def __init__(self, member_types):
super(IdlUnionType, self).__init__()
self.member_types = member_types
def __str__(self):
return '(' + ' or '.join(str(member_type) for member_type in self.member_types) + ')'
def __hash__(self):
return hash(self.name)
def __eq__(self, rhs):
return self.name == rhs.name
def __getstate__(self):
return {
'member_types': self.member_types,
}
def __setstate__(self, state):
self.member_types = state['member_types']
@property
def is_union_type(self):
return True
def single_matching_member_type(self, predicate):
matching_types = filter(predicate, self.member_types)
if len(matching_types) > 1:
raise "%s is ambigious." % self.name
return matching_types[0] if matching_types else None
@property
def string_member_type(self):
return self.single_matching_member_type(
lambda member_type: (member_type.is_string_type or
member_type.is_enum))
@property
def numeric_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.is_numeric_type)
@property
def boolean_member_type(self):
return self.single_matching_member_type(
lambda member_type: member_type.base_type == 'boolean')
@property
def as_union_type(self):
# Note: Use this to "look through" a possible IdlNullableType wrapper.
return self
@property
def name(self):
"""Return type name (or inner type name if nullable)
http://heycam.github.io/webidl/#dfn-type-name
"""
return 'Or'.join(member_type.name for member_type in self.member_types)
def resolve_typedefs(self, typedefs):
self.member_types = [
typedefs.get(member_type, member_type)
for member_type in self.member_types]
return self
def idl_types(self):
yield self
for member_type in self.member_types:
for idl_type in member_type.idl_types():
yield idl_type
################################################################################
# IdlArrayOrSequenceType, IdlArrayType, IdlSequenceType
################################################################################
class IdlArrayOrSequenceType(IdlTypeBase):
"""Base class for IdlArrayType and IdlSequenceType."""
def __init__(self, element_type):
super(IdlArrayOrSequenceType, self).__init__()
self.element_type = element_type
def __getstate__(self):
return {
'element_type': self.element_type,
}
def __setstate__(self, state):
self.element_type = state['element_type']
def resolve_typedefs(self, typedefs):
self.element_type = self.element_type.resolve_typedefs(typedefs)
return self
@property
def is_array_or_sequence_type(self):
return True
@property
def enum_values(self):
return self.element_type.enum_values
@property
def enum_type(self):
return self.element_type.enum_type
def idl_types(self):
yield self
for idl_type in self.element_type.idl_types():
yield idl_type
class IdlArrayType(IdlArrayOrSequenceType):
def __init__(self, element_type):
super(IdlArrayType, self).__init__(element_type)
def __str__(self):
return '%s[]' % self.element_type
@property
def name(self):
return self.element_type.name + 'Array'
class IdlSequenceType(IdlArrayOrSequenceType):
def __init__(self, element_type):
super(IdlSequenceType, self).__init__(element_type)
def __str__(self):
return 'sequence<%s>' % self.element_type
@property
def name(self):
return self.element_type.name + 'Sequence'
################################################################################
# IdlNullableType
################################################################################
class IdlNullableType(IdlTypeBase):
def __init__(self, inner_type):
super(IdlNullableType, self).__init__()
self.inner_type = inner_type
def __str__(self):
# FIXME: Dictionary::ConversionContext::setConversionType can't
# handle the '?' in nullable types (passes nullability separately).
# Update that function to handle nullability from the type name,
# simplifying its signature.
# return str(self.inner_type) + '?'
return str(self.inner_type)
def __getattr__(self, name):
return getattr(self.inner_type, name)
def __getstate__(self):
return {
'inner_type': self.inner_type,
}
def __setstate__(self, state):
self.inner_type = state['inner_type']
@property
def is_nullable(self):
return True
@property
def name(self):
return self.inner_type.name + 'OrNull'
def resolve_typedefs(self, typedefs):
self.inner_type = self.inner_type.resolve_typedefs(typedefs)
return self
def idl_types(self):
yield self
for idl_type in self.inner_type.idl_types():
yield idl_type
|
formath/mxnet
|
refs/heads/master
|
tests/python/train/test_autograd.py
|
28
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
import numpy as np
import logging
from common import get_data
from mxnet import autograd
logging.basicConfig(level=logging.DEBUG)
# define network
def get_net():
net = nn.Sequential()
net.add(nn.Dense(128, activation='relu', prefix='fc1_'))
net.add(nn.Dense(64, activation='relu', prefix='fc2_'))
net.add(nn.Dense(10, prefix='fc3_'))
return net
get_data.GetMNIST_ubyte()
batch_size = 100
train_data = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(784,),
label_name='sm_label',
batch_size=batch_size, shuffle=True, flat=True, silent=False, seed=10)
val_data = mx.io.MNISTIter(
image="data/t10k-images-idx3-ubyte",
label="data/t10k-labels-idx1-ubyte",
data_shape=(784,),
label_name='sm_label',
batch_size=batch_size, shuffle=True, flat=True, silent=False)
def score(net, ctx_list):
metric = mx.metric.Accuracy()
val_data.reset()
for batch in val_data:
datas = gluon.utils.split_and_load(batch.data[0], ctx_list, batch_axis=0)
labels = gluon.utils.split_and_load(batch.label[0], ctx_list, batch_axis=0)
outputs = []
for x in datas:
outputs.append(net(x))
metric.update(labels, outputs)
return metric.get()[1]
def train(net, epoch, ctx_list):
net.collect_params().initialize(mx.init.Xavier(magnitude=2.24), ctx=ctx_list)
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})
metric = mx.metric.Accuracy()
loss = gluon.loss.SoftmaxCrossEntropyLoss()
for i in range(epoch):
train_data.reset()
for batch in train_data:
datas = gluon.utils.split_and_load(batch.data[0], ctx_list, batch_axis=0)
labels = gluon.utils.split_and_load(batch.label[0], ctx_list, batch_axis=0)
outputs = []
with autograd.record():
for x, y in zip(datas, labels):
z = net(x)
L = loss(z, y)
L.backward()
outputs.append(z)
trainer.step(batch.data[0].shape[0])
metric.update(labels, outputs)
name, acc = metric.get()
metric.reset()
print('training acc at epoch %d: %s=%f'%(i, name, acc))
def test_autograd():
net1 = get_net()
train(net1, 5, [mx.cpu(0), mx.cpu(1)])
acc1 = score(net1, [mx.cpu(0)])
acc2 = score(net1, [mx.cpu(0), mx.cpu(1)])
assert acc1 > 0.95
assert abs(acc1 - acc2) < 0.01
net1.collect_params().save('mnist.params')
net2 = get_net()
net2.collect_params().load('mnist.params', ctx=[mx.cpu(0)])
acc3 = score(net2, [mx.cpu(0)])
assert abs(acc3 - acc1) < 0.0001
if __name__ == '__main__':
test_autograd()
|
davidemms/OrthoFinder
|
refs/heads/master
|
tests/test_consensus_tree.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 18:01:40 2018
@author: david
"""
import unittest
from . import tree
from . import consensus_tree as ct
import sys
sys.path.append("/home/david/workspace/p4/OrthoFinder/orthofinder/scripts/")
import tree_isomorphism as ti
taxa = "a b c d e".split()
taxa_index = {t:i for i, t in enumerate(taxa)}
a = ct.BitVector(taxa_index, "a")
b = ct.BitVector(taxa_index, "b")
c = ct.BitVector(taxa_index, "c")
d = ct.BitVector(taxa_index, "d")
e = ct.BitVector(taxa_index, "e")
class TestConsensusTree(unittest.TestCase):
def test_BitVector(self):
taxa = "a b c d e".split()
taxa_index = {t:i for i, t in enumerate(taxa)}
x = ct.BitVector(taxa_index, "a")
self.assertTrue(x.Is("a"))
self.assertFalse(x.Is("b"))
self.assertFalse(x.Is("c"))
self.assertFalse(x.Is("d"))
self.assertFalse(x.Is("e"))
y = ct.BitVector(taxa_index, "c")
self.assertFalse(y.Is("a"))
self.assertFalse(y.Is("b"))
self.assertTrue(y.Is("c"))
self.assertFalse(y.Is("d"))
self.assertFalse(y.Is("e"))
x.Add(y)
self.assertTrue(x.Is("a"))
self.assertFalse(x.Is("b"))
self.assertTrue(x.Is("c"))
self.assertFalse(x.Is("d"))
self.assertFalse(x.Is("e"))
z = ct.BitVector(taxa_index, "c")
z2 = ct.BitVector(taxa_index, "d")
z.Add(z2)
z.Add(y)
self.assertFalse(z.Is("a"))
self.assertFalse(z.Is("b"))
self.assertTrue(z.Is("c"))
self.assertTrue(z.Is("d"))
self.assertFalse(z.Is("e"))
def test_UpdateSplits(self):
all_splits = []
t = tree.Tree("((a,b),(c,d));")
taxa = "abcd"
taxa_index = {t:i for i, t in enumerate(taxa)}
ct.UpdateSplits(all_splits, t, taxa_index)
self.assertEqual(len(all_splits), 5)
s = list(map(bin, all_splits))
self.assertTrue("0b1110" in s) # a
self.assertTrue("0b10" in s) # b
self.assertTrue("0b1100" in s) #ab
self.assertTrue("0b1000" in s) #abc
self.assertTrue("0b100" in s) #abd
def test_GetCompatibleSplits(self):
x = ct.BitVector(taxa_index, "a")
x.Add(ct.BitVector(taxa_index, "b"))
x.Add(ct.BitVector(taxa_index, "c"))
y = ct.BitVector(taxa_index, "a")
y.Add(ct.BitVector(taxa_index, "b"))
z = ct.BitVector(taxa_index, "b")
z.Add(ct.BitVector(taxa_index, "e"))
all_splits = [x.Canonical(), x.Canonical(), z.Canonical(), y.Canonical(), y.Canonical(), y.Canonical()]
# x 0b00111 -> 0b11000
# y 0b00011 -> 0b11100
com_sp = ct.GetCompatibleSplits(all_splits)
self.assertEqual(len(com_sp), 2)
self.assertEqual(bin(com_sp[0]), "0b11100")
self.assertEqual(bin(com_sp[1]), "0b11000")
def test_GetConstructTree(self):
x = ct.BitVector(taxa_index, "a")
x.Add(ct.BitVector(taxa_index, "b"))
x.Add(ct.BitVector(taxa_index, "c"))
y = ct.BitVector(taxa_index, "a")
y.Add(ct.BitVector(taxa_index, "b"))
z = ct.BitVector(taxa_index, "b")
z.Add(ct.BitVector(taxa_index, "e"))
all_splits = [x.Canonical(), x.Canonical(), z.Canonical(), y.Canonical(), y.Canonical(), y.Canonical()]
com_sp = ct.GetCompatibleSplits(all_splits)
t = ct.ConstructTree(com_sp, taxa_index, taxa)
self.assertTrue(ti.IsIso_labelled_ete_nonbinary(t, tree.Tree("(((d,e),c),a,b);"), ti.Identity))
def test_GetConstructTree2(self):
x = ct.BitVector(taxa_index)
x.Add(a)
x.Add(b)
y = ct.BitVector(taxa_index)
y.Add(c)
y.Add(e)
com_sp = [x.Canonical(), y.Canonical()]
t = ct.ConstructTree(com_sp, taxa_index, taxa)
self.assertTrue(ti.IsIso_labelled_ete_nonbinary(t, tree.Tree("(a,b,((c,e),d));"), ti.Identity))
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestConsensusTree)
unittest.TextTestRunner(verbosity=2).run(suite)
|
kirca/odoo
|
refs/heads/master
|
addons/sale_analytic_plans/sale_analytic_plans.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def invoice_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
line_obj = self.pool.get('account.invoice.line')
create_ids = super(sale_order_line, self).invoice_line_create(cr, uid, ids, context=context)
i = 0
for line in self.browse(cr, uid, ids, context=context):
line_obj.write(cr, uid, [create_ids[i]], {'analytics_id': line.analytics_id.id})
i = i + 1
return create_ids
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
wholeGenomeSequencingAnalysisPipeline/BacPipe
|
refs/heads/master
|
SPAdes-3.13.0-Linux/bin/spades.py
|
2
|
#!/usr/bin/env python
############################################################################
# Copyright (c) 2015 Saint Petersburg State University
# Copyright (c) 2011-2014 Saint Petersburg Academic University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import os
import shutil
from site import addsitedir
from distutils import dir_util
from os.path import abspath, expanduser
import sys
import getopt
import logging
import platform
import errno
import spades_init
spades_init.init()
spades_home = spades_init.spades_home
bin_home = spades_init.bin_home
python_modules_home = spades_init.python_modules_home
ext_python_modules_home = spades_init.ext_python_modules_home
spades_version = spades_init.spades_version
import support
support.check_python_version()
from process_cfg import merge_configs, empty_config, load_config_from_file
import hammer_logic
import spades_logic
import options_storage
addsitedir(ext_python_modules_home)
if sys.version.startswith('2.'):
import pyyaml2 as pyyaml
elif sys.version.startswith('3.'):
import pyyaml3 as pyyaml
import moleculo_postprocessing
import alignment
def print_used_values(cfg, log):
def print_value(cfg, section, param, pretty_param="", margin=" "):
if not pretty_param:
pretty_param = param.capitalize().replace('_', ' ')
line = margin + pretty_param
if param in cfg[section].__dict__:
line += ": " + str(cfg[section].__dict__[param])
else:
if param.find("offset") != -1:
line += " will be auto-detected"
log.info(line)
log.info("")
# system info
log.info("System information:")
try:
log.info(" SPAdes version: " + str(spades_version).strip())
log.info(" Python version: " + ".".join(map(str, sys.version_info[0:3])))
# for more details: '[' + str(sys.version_info) + ']'
log.info(" OS: " + platform.platform())
# for more details: '[' + str(platform.uname()) + ']'
except Exception:
log.info(" Problem occurred when getting system information")
log.info("")
# main
print_value(cfg, "common", "output_dir", "", "")
if ("error_correction" in cfg) and (not "assembly" in cfg):
log.info("Mode: ONLY read error correction (without assembling)")
elif (not "error_correction" in cfg) and ("assembly" in cfg):
log.info("Mode: ONLY assembling (without read error correction)")
else:
log.info("Mode: read error correction and assembling")
if ("common" in cfg) and ("developer_mode" in cfg["common"].__dict__):
if cfg["common"].developer_mode:
log.info("Debug mode is turned ON")
else:
log.info("Debug mode is turned OFF")
log.info("")
# dataset
if "dataset" in cfg:
log.info("Dataset parameters:")
if options_storage.iontorrent:
log.info(" IonTorrent data")
if options_storage.meta:
log.info(" Metagenomic mode")
elif options_storage.large_genome:
log.info(" Large genome mode")
elif options_storage.truseq_mode:
log.info(" Illumina TruSeq mode")
elif options_storage.rna:
log.info(" RNA-seq mode")
elif options_storage.single_cell:
log.info(" Single-cell mode")
else:
log.info(" Multi-cell mode (you should set '--sc' flag if input data"\
" was obtained with MDA (single-cell) technology"\
" or --meta flag if processing metagenomic dataset)")
log.info(" Reads:")
dataset_data = pyyaml.load(open(cfg["dataset"].yaml_filename, 'r'))
dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(cfg["dataset"].yaml_filename))
support.pretty_print_reads(dataset_data, log)
# error correction
if "error_correction" in cfg:
log.info("Read error correction parameters:")
print_value(cfg, "error_correction", "max_iterations", "Iterations")
print_value(cfg, "error_correction", "qvoffset", "PHRED offset")
if cfg["error_correction"].gzip_output:
log.info(" Corrected reads will be compressed")
else:
log.info(" Corrected reads will NOT be compressed")
# assembly
if "assembly" in cfg:
log.info("Assembly parameters:")
if options_storage.auto_K_allowed():
log.info(" k: automatic selection based on read length")
else:
print_value(cfg, "assembly", "iterative_K", "k")
if options_storage.plasmid:
log.info(" Plasmid mode is turned ON")
if cfg["assembly"].disable_rr:
log.info(" Repeat resolution is DISABLED")
else:
log.info(" Repeat resolution is enabled")
if options_storage.careful:
log.info(" Mismatch careful mode is turned ON")
else:
log.info(" Mismatch careful mode is turned OFF")
if "mismatch_corrector" in cfg:
log.info(" MismatchCorrector will be used")
else:
log.info(" MismatchCorrector will be SKIPPED")
if cfg["assembly"].cov_cutoff == 'off':
log.info(" Coverage cutoff is turned OFF")
elif cfg["assembly"].cov_cutoff == 'auto':
log.info(" Coverage cutoff is turned ON and threshold will be auto-detected")
else:
log.info(" Coverage cutoff is turned ON and threshold is " + str(cfg["assembly"].cov_cutoff))
log.info("Other parameters:")
print_value(cfg, "common", "tmp_dir", "Dir for temp files")
print_value(cfg, "common", "max_threads", "Threads")
print_value(cfg, "common", "max_memory", "Memory limit (in Gb)", " ")
log.info("")
def fill_cfg(options_to_parse, log, secondary_filling=False):
skip_output_dir = secondary_filling
skip_stop_after = secondary_filling
load_processed_dataset=secondary_filling
try:
options, not_options = getopt.gnu_getopt(options_to_parse, options_storage.short_options, options_storage.long_options)
except getopt.GetoptError:
_, exc, _ = sys.exc_info()
sys.stderr.write(str(exc) + "\n")
sys.stderr.flush()
show_usage(1)
if not options:
show_usage(1)
if len(not_options) > 1:
for opt, arg in options:
if opt == "-k" and arg.strip().endswith(','):
support.error("Do not put spaces after commas in the list of k-mers sizes! Correct example: -k 21,33,55", log)
support.error("Please specify option (e.g. -1, -2, -s, etc) for the following paths: " + ", ".join(not_options[1:]) + "\n", log)
# all parameters are stored here
cfg = dict()
# dataset is stored here. We are prepared for up to MAX_LIBS_NUMBER for each type of short-reads libs
dataset_data = [{} for i in range(options_storage.MAX_LIBS_NUMBER *
len(options_storage.SHORT_READS_TYPES.keys()) +
len(options_storage.LONG_READS_TYPES))] # "[{}]*num" doesn't work here!
# auto detecting SPAdes mode (rna, meta, etc) if it is not a rerun (--continue or --restart-from)
if secondary_filling or not options_storage.will_rerun(options):
mode = options_storage.get_mode()
if mode is not None:
options.append(('--' + mode, ''))
# for parsing options from "previous run command"
options_storage.continue_mode = False
options_storage.k_mers = None
for opt, arg in options:
if opt == '-o':
if not skip_output_dir:
if options_storage.output_dir is not None:
support.error('-o option was specified at least twice')
options_storage.output_dir = abspath(expanduser(arg))
options_storage.dict_of_rel2abs[arg] = options_storage.output_dir
support.check_path_is_ascii(options_storage.output_dir, 'output directory')
elif opt == "--tmp-dir":
options_storage.tmp_dir = abspath(expanduser(arg))
options_storage.dict_of_rel2abs[arg] = options_storage.tmp_dir
support.check_path_is_ascii(options_storage.tmp_dir, 'directory for temporary files')
elif opt == "--configs-dir":
options_storage.configs_dir = support.check_dir_existence(arg)
elif opt == "--reference":
options_storage.reference = support.check_file_existence(arg, 'reference', log)
options_storage.developer_mode = True
elif opt == "--series-analysis":
options_storage.series_analysis = support.check_file_existence(arg, 'series-analysis', log)
elif opt == "--dataset":
options_storage.dataset_yaml_filename = support.check_file_existence(arg, 'dataset', log)
elif opt in options_storage.reads_options:
support.add_to_dataset(opt, arg, dataset_data)
elif opt == '-k':
if arg == 'auto':
options_storage.k_mers = arg
else:
options_storage.k_mers = list(map(int, arg.split(",")))
for k in options_storage.k_mers:
if k < options_storage.MIN_K or k > options_storage.MAX_K:
support.error('wrong k value ' + str(k) + ': all k values should be between %d and %d' %
(options_storage.MIN_K, options_storage.MAX_K), log)
if k % 2 == 0:
support.error('wrong k value ' + str(k) + ': all k values should be odd', log)
elif opt == "--sc":
options_storage.single_cell = True
elif opt == "--meta":
options_storage.meta = True
elif opt == "--large-genome":
options_storage.large_genome = True
elif opt == "--plasmid":
options_storage.plasmid = True
elif opt == "--rna":
options_storage.rna = True
elif opt.startswith("--ss-"): # strand specificity, RNA-Seq only
if opt == "--ss-rf":
options_storage.strand_specificity = 'rf'
elif opt == "--ss-fr":
options_storage.strand_specificity = 'fr'
elif opt == "--fast": # fast run, RNA-Seq only
options_storage.fast = True
elif opt == "--fast:false":
options_storage.fast = False
elif opt == "--iontorrent":
options_storage.iontorrent = True
elif opt == "--disable-gzip-output":
options_storage.disable_gzip_output = True
elif opt == "--disable-gzip-output:false":
options_storage.disable_gzip_output = False
elif opt == "--disable-rr":
options_storage.disable_rr = True
elif opt == "--disable-rr:false":
options_storage.disable_rr = False
elif opt == "--only-error-correction":
if options_storage.only_assembler:
support.error('you cannot specify --only-error-correction and --only-assembler simultaneously')
options_storage.only_error_correction = True
elif opt == "--only-assembler":
if options_storage.only_error_correction:
support.error('you cannot specify --only-error-correction and --only-assembler simultaneously')
options_storage.only_assembler = True
elif opt == "--read-buffer-size":
options_storage.read_buffer_size = int(arg)
elif opt == "--bh-heap-check":
options_storage.bh_heap_check = arg
elif opt == "--spades-heap-check":
options_storage.spades_heap_check = arg
elif opt == "--continue":
options_storage.continue_mode = True
elif opt == "--restart-from":
if arg not in ['ec', 'as', 'mc', 'scc', 'tpp', 'last'] and not arg.startswith('k'):
support.error("wrong value for --restart-from option: " + arg +
" (should be 'ec', 'as', 'k<int>', 'mc', or 'last'", log)
options_storage.continue_mode = True
options_storage.restart_from = arg
elif opt == "--stop-after":
if not skip_stop_after:
if arg not in ['ec', 'as', 'mc', 'scc', 'tpp'] and not arg.startswith('k'):
support.error("wrong value for --stop-after option: " + arg +
" (should be 'ec', 'as', 'k<int>', or 'mc'", log)
options_storage.stop_after = arg
elif opt == '-t' or opt == "--threads":
options_storage.threads = int(arg)
elif opt == '-m' or opt == "--memory":
options_storage.memory = int(arg)
elif opt == "--phred-offset":
if arg == 'auto':
options_storage.qvoffset = arg
elif arg in ['33', '64']:
options_storage.qvoffset = int(arg)
else:
support.error('wrong PHRED quality offset value: ' + arg +
' (should be either 33, 64, or \'auto\')', log)
elif opt == "--save-gp":
options_storage.save_gp = True
elif opt == "--cov-cutoff":
if arg == 'auto' or arg == 'off':
options_storage.cov_cutoff = arg
elif support.is_float(arg) and float(arg) > 0.0:
options_storage.cov_cutoff = float(arg)
else:
support.error('wrong value for --cov-cutoff option: ' + arg +
' (should be a positive float number, or \'auto\', or \'off\')', log)
elif opt == "--hidden-cov-cutoff":
if support.is_float(arg) and float(arg) > 0.0:
options_storage.lcer_cutoff = float(arg)
else:
support.error('wrong value for --hidden-cov-cutoff option: ' + arg +
' (should be a positive float number)', log)
elif opt == "--read-cov-threshold":
if support.is_int(arg) and int(arg) >= 0:
options_storage.read_cov_threshold = int(arg)
else:
support.error('wrong value for ----read-cov-threshold option: ' + arg +
' (should be a non-negative integer number)', log)
elif opt == '-i' or opt == "--iterations":
options_storage.iterations = int(arg)
elif opt == "--debug":
options_storage.developer_mode = True
elif opt == "--debug:false":
options_storage.developer_mode = False
#corrector
elif opt == "--mismatch-correction":
options_storage.mismatch_corrector = True
elif opt == "--mismatch-correction:false":
options_storage.mismatch_corrector = False
elif opt == "--careful":
options_storage.mismatch_corrector = True
options_storage.careful = True
elif opt == "--careful:false":
options_storage.mismatch_corrector = False
options_storage.careful = False
elif opt == '-v' or opt == "--version":
show_version()
elif opt == '-h' or opt == "--help":
show_usage(0)
elif opt == "--help-hidden":
show_usage(0, show_hidden=True)
elif opt == "--test":
options_storage.set_test_options()
#break
elif opt == "--truseq":
options_storage.enable_truseq_mode()
else:
raise ValueError
if options_storage.test_mode:
if options_storage.plasmid:
support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset_plasmid/pl1.fq.gz"), dataset_data)
support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset_plasmid/pl2.fq.gz"), dataset_data)
else:
support.add_to_dataset('-1', os.path.join(spades_home, "test_dataset/ecoli_1K_1.fq.gz"), dataset_data)
support.add_to_dataset('-2', os.path.join(spades_home, "test_dataset/ecoli_1K_2.fq.gz"), dataset_data)
if not options_storage.output_dir:
support.error("the output_dir is not set! It is a mandatory parameter (-o output_dir).", log)
if not os.path.isdir(options_storage.output_dir):
if options_storage.continue_mode:
support.error("the output_dir should exist for --continue and for --restart-from!", log)
os.makedirs(options_storage.output_dir)
if options_storage.restart_from:
if options_storage.continue_mode: # saving parameters specified with --restart-from
if not support.dataset_is_empty(dataset_data):
support.error("you cannot specify reads with --restart-from option!", log)
options_storage.save_restart_options(log)
else: # overriding previous run parameters
options_storage.load_restart_options()
elif options_storage.continue_mode: # it is just --continue, NOT --restart-from
if len(options) != 2: # one for output_dir (required) and another one for --continue itself
support.error("you cannot specify any option except -o with --continue option! "
"Please use '--restart-from last' if you need to change some "
"of the options from the initial run and continue from the last available checkpoint.", log)
if options_storage.meta:
if options_storage.careful or options_storage.mismatch_corrector or options_storage.cov_cutoff != "off":
support.error("you cannot specify --careful, --mismatch-correction or --cov-cutoff in metagenomic mode!", log)
if options_storage.rna:
if options_storage.careful:
support.error("you cannot specify --careful in RNA-Seq mode!", log)
# if options_storage.k_mers and options_storage.k_mers != 'auto' and len(options_storage.k_mers) > 1:
# support.error("you cannot specify multiple k-mer sizes in RNA-Seq mode!", log)
if options_storage.restart_from and options_storage.restart_from.startswith('k'):
support.error("you cannot restart rnaSPAdes from a certain k-mer size, use --restart-from as", log)
if [options_storage.meta, options_storage.large_genome, options_storage.truseq_mode,
options_storage.rna, options_storage.plasmid, options_storage.single_cell].count(True) > 1:
support.error("you cannot simultaneously use more than one mode out of "
"Metagenomic, Large genome, Illumina TruSeq, RNA-Seq, Plasmid, and Single-cell!", log)
if options_storage.continue_mode:
return None, None
existing_dataset_data = None
processed_dataset_fpath = os.path.join(options_storage.output_dir, "input_dataset.yaml")
if load_processed_dataset:
if os.path.isfile(processed_dataset_fpath):
try:
existing_dataset_data = pyyaml.load(open(processed_dataset_fpath, 'r'))
except pyyaml.YAMLError:
existing_dataset_data = None
if existing_dataset_data is not None:
dataset_data = existing_dataset_data
else:
if options_storage.dataset_yaml_filename:
try:
dataset_data = pyyaml.load(open(options_storage.dataset_yaml_filename, 'r'))
except pyyaml.YAMLError:
_, exc, _ = sys.exc_info()
support.error('exception caught while parsing YAML file (' + options_storage.dataset_yaml_filename + '):\n' + str(exc))
dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(options_storage.dataset_yaml_filename))
else:
dataset_data = support.correct_dataset(dataset_data)
dataset_data = support.relative2abs_paths(dataset_data, os.getcwd())
options_storage.dataset_yaml_filename = processed_dataset_fpath
support.check_dataset_reads(dataset_data, options_storage.only_assembler, log)
if not support.get_lib_ids_by_type(dataset_data, spades_logic.READS_TYPES_USED_IN_CONSTRUCTION):
support.error('you should specify at least one unpaired, paired-end, or high-quality mate-pairs library!')
if options_storage.rna:
if len(dataset_data) != len(support.get_lib_ids_by_type(dataset_data, spades_logic.READS_TYPES_USED_IN_RNA_SEQ)):
support.error('you cannot specify any data types except ' +
', '.join(spades_logic.READS_TYPES_USED_IN_RNA_SEQ) + ' in RNA-Seq mode!')
#if len(support.get_lib_ids_by_type(dataset_data, 'paired-end')) > 1:
# support.error('you cannot specify more than one paired-end library in RNA-Seq mode!')
if options_storage.meta and not options_storage.only_error_correction:
if len(support.get_lib_ids_by_type(dataset_data, "paired-end")) != 1 or \
len(dataset_data) - min(1, len(support.get_lib_ids_by_type(dataset_data, ["tslr", "pacbio", "nanopore"]))) > 1:
support.error('you cannot specify any data types except a single paired-end library '
'(optionally accompanied by a single library of '
'TSLR-contigs, or PacBio reads, or Nanopore reads) in metaSPAdes mode!')
if existing_dataset_data is None:
pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'),
default_flow_style=False, default_style='"', width=float("inf"))
options_storage.set_default_values()
### FILLING cfg
cfg["common"] = empty_config()
cfg["dataset"] = empty_config()
if not options_storage.only_assembler:
cfg["error_correction"] = empty_config()
if not options_storage.only_error_correction:
cfg["assembly"] = empty_config()
# common
cfg["common"].__dict__["output_dir"] = options_storage.output_dir
cfg["common"].__dict__["tmp_dir"] = options_storage.tmp_dir
cfg["common"].__dict__["max_threads"] = options_storage.threads
cfg["common"].__dict__["max_memory"] = options_storage.memory
cfg["common"].__dict__["developer_mode"] = options_storage.developer_mode
if options_storage.series_analysis:
cfg["common"].__dict__["series_analysis"] = options_storage.series_analysis
# dataset section
cfg["dataset"].__dict__["yaml_filename"] = options_storage.dataset_yaml_filename
if options_storage.developer_mode and options_storage.reference:
cfg["dataset"].__dict__["reference"] = options_storage.reference
# error correction
if (not options_storage.only_assembler) and (options_storage.iterations > 0):
cfg["error_correction"].__dict__["output_dir"] = os.path.join(cfg["common"].output_dir, "corrected")
cfg["error_correction"].__dict__["max_iterations"] = options_storage.iterations
cfg["error_correction"].__dict__["gzip_output"] = not options_storage.disable_gzip_output
if options_storage.qvoffset:
cfg["error_correction"].__dict__["qvoffset"] = options_storage.qvoffset
if options_storage.bh_heap_check:
cfg["error_correction"].__dict__["heap_check"] = options_storage.bh_heap_check
cfg["error_correction"].__dict__["iontorrent"] = options_storage.iontorrent
if options_storage.meta or options_storage.large_genome:
cfg["error_correction"].__dict__["count_filter_singletons"] = 1
if options_storage.read_buffer_size:
cfg["error_correction"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
# assembly
if not options_storage.only_error_correction:
if options_storage.k_mers == 'auto' and options_storage.restart_from is None:
options_storage.k_mers = None
if options_storage.k_mers:
cfg["assembly"].__dict__["iterative_K"] = options_storage.k_mers
elif options_storage.rna:
cfg["assembly"].__dict__["iterative_K"] = 'auto'
else:
cfg["assembly"].__dict__["iterative_K"] = options_storage.K_MERS_SHORT
cfg["assembly"].__dict__["disable_rr"] = options_storage.disable_rr
cfg["assembly"].__dict__["cov_cutoff"] = options_storage.cov_cutoff
cfg["assembly"].__dict__["lcer_cutoff"] = options_storage.lcer_cutoff
cfg["assembly"].__dict__["save_gp"] = options_storage.save_gp
if options_storage.spades_heap_check:
cfg["assembly"].__dict__["heap_check"] = options_storage.spades_heap_check
if options_storage.read_buffer_size:
cfg["assembly"].__dict__["read_buffer_size"] = options_storage.read_buffer_size
cfg["assembly"].__dict__["correct_scaffolds"] = options_storage.correct_scaffolds
#corrector can work only if contigs exist (not only error correction)
if (not options_storage.only_error_correction) and options_storage.mismatch_corrector:
cfg["mismatch_corrector"] = empty_config()
cfg["mismatch_corrector"].__dict__["skip-masked"] = None
cfg["mismatch_corrector"].__dict__["bwa"] = os.path.join(bin_home, "spades-bwa")
cfg["mismatch_corrector"].__dict__["threads"] = options_storage.threads
cfg["mismatch_corrector"].__dict__["output-dir"] = options_storage.output_dir
cfg["run_truseq_postprocessing"] = options_storage.run_truseq_postprocessing
return cfg, dataset_data
def check_cfg_for_partial_run(cfg, type='restart-from'): # restart-from ot stop-after
if type == 'restart-from':
check_point = options_storage.restart_from
action = 'restart from'
verb = 'was'
elif type == 'stop-after':
check_point = options_storage.stop_after
action = 'stop after'
verb = 'is'
else:
return
if check_point == 'ec' and ("error_correction" not in cfg):
support.error("failed to " + action + " 'read error correction' ('" + check_point + "') because this stage " + verb + " not specified!")
if check_point == 'mc' and ("mismatch_corrector" not in cfg):
support.error("failed to " + action + " 'mismatch correction' ('" + check_point + "') because this stage " + verb + " not specified!")
if check_point == 'as' or check_point.startswith('k'):
if "assembly" not in cfg:
support.error("failed to " + action + " 'assembling' ('" + check_point + "') because this stage " + verb + " not specified!")
if check_point.startswith('k'):
correct_k = False
k_to_check = options_storage.k_mers
if not k_to_check:
if options_storage.auto_K_allowed():
k_to_check = list(set(options_storage.K_MERS_SHORT + options_storage.K_MERS_150 + options_storage.K_MERS_250))
else:
k_to_check = options_storage.K_MERS_SHORT
for k in k_to_check:
if check_point == ("k%d" % k) or check_point.startswith("k%d:" % k):
correct_k = True
break
if not correct_k:
k_str = check_point[1:]
if k_str.find(":") != -1:
k_str = k_str[:k_str.find(":")]
support.error("failed to " + action + " K=%s because this K " % k_str + verb + " not specified!")
def rna_k_values(support, option_storage, dataset_data, log):
rna_rl = support.get_reads_length(dataset_data, log, ['merged reads'])
upper_k = int(rna_rl / 2) - 1
if upper_k % 2 == 0:
upper_k -= 1
lower_k = min(max(int(rna_rl / 3), options_storage.RNA_MIN_K), options_storage.RNA_MAX_LOWER_K)
if lower_k % 2 == 0:
lower_k -= 1
use_iterative = True
if upper_k <= lower_k:
use_iterative= False
if upper_k < options_storage.RNA_MIN_K:
support.warning("\n" + 'Auto K value (' + str(k_value) + ') is too small, recommended to be at least %d.\n' % (options_storage.RNA_MIN_K))
if rna_rl <= options_storage.RNA_MIN_K:
support.warning('Read length is too small (%d), but keeping current K value anyway. Consider setting K manually. K\n' % (rna_rl))
else:
upper_k = options_storage.RNA_MIN_K
log.info('Upper K value is set to %d.\n' % (upper_k))
if upper_k > options_storage.MAX_K:
log.info("\n" + 'Auto K value (' + str(upper_k) + ') is too large, all K values should not exceed %d. Setting k=%d.\n'
% (options_storage.MAX_K, options_storage.MAX_K))
upper_k = options_storage.MAX_K
if options_storage.fast or (not use_iterative):
return [upper_k]
return [lower_k, upper_k]
def get_options_from_params(params_filename, running_script):
cmd_line = None
options = None
if not os.path.isfile(params_filename):
return cmd_line, options, "failed to parse command line of the previous run (%s not found)!" % params_filename
params = open(params_filename, 'r')
cmd_line = params.readline().strip()
spades_prev_version = None
for line in params:
if line.find('SPAdes version:') != -1:
spades_prev_version = line.split('SPAdes version:')[1]
break
params.close()
if spades_prev_version is None:
return cmd_line, options, "failed to parse SPAdes version of the previous run!"
if spades_prev_version.strip() != spades_version.strip():
return cmd_line, options, "SPAdes version of the previous run (%s) is not equal " \
"to the current version of SPAdes (%s)!" \
% (spades_prev_version.strip(), spades_version.strip())
if 'Command line: ' not in cmd_line or '\t' not in cmd_line:
return cmd_line, options, "failed to parse executable script of the previous run!"
options = cmd_line.split('\t')[1:]
prev_running_script = cmd_line.split('\t')[0][len('Command line: '):]
# we cannot restart/continue spades.py run with metaspades.py/rnaspades.py/etc and vice versa
if os.path.basename(prev_running_script) != os.path.basename(running_script):
return cmd_line, options, "executable script of the previous run (%s) is not equal " \
"to the current executable script (%s)!" \
% (os.path.basename(prev_running_script),
os.path.basename(running_script))
return cmd_line, options, ""
def show_version():
options_storage.version(spades_version)
sys.exit(0)
def show_usage(code, show_hidden=False):
options_storage.usage(spades_version, show_hidden=show_hidden)
sys.exit(code)
def main(args):
os.environ["LC_ALL"] = "C"
if len(args) == 1:
show_usage(0)
log = logging.getLogger('spades')
log.setLevel(logging.DEBUG)
console = logging.StreamHandler(sys.stdout)
console.setFormatter(logging.Formatter('%(message)s'))
console.setLevel(logging.DEBUG)
log.addHandler(console)
support.check_binaries(bin_home, log)
# parse options and safe all parameters to cfg
options = args
cfg, dataset_data = fill_cfg(options, log)
if options_storage.continue_mode:
cmd_line, options, err_msg = get_options_from_params(os.path.join(options_storage.output_dir, "params.txt"), args[0])
if err_msg:
support.error(err_msg + " Please restart from the beginning or specify another output directory.")
cfg, dataset_data = fill_cfg(options, log, secondary_filling=True)
if options_storage.restart_from:
check_cfg_for_partial_run(cfg, type='restart-from')
options_storage.continue_mode = True
if options_storage.stop_after:
check_cfg_for_partial_run(cfg, type='stop-after')
log_filename = os.path.join(cfg["common"].output_dir, "spades.log")
if options_storage.continue_mode:
log_handler = logging.FileHandler(log_filename, mode='a')
else:
log_handler = logging.FileHandler(log_filename, mode='w')
log.addHandler(log_handler)
if options_storage.continue_mode:
log.info("\n======= SPAdes pipeline continued. Log can be found here: " + log_filename + "\n")
log.info("Restored from " + cmd_line)
if options_storage.restart_from:
updated_params = ""
skip_next = False
for v in args[1:]:
if v == '-o' or v == '--restart-from':
skip_next = True
continue
if skip_next or v.startswith('--restart-from='): # you can specify '--restart-from=k33' but not '-o=out_dir'
skip_next = False
continue
updated_params += "\t" + v
updated_params = updated_params.strip()
log.info("with updated parameters: " + updated_params)
cmd_line += "\t" + updated_params
log.info("")
params_filename = os.path.join(cfg["common"].output_dir, "params.txt")
params_handler = logging.FileHandler(params_filename, mode='w')
log.addHandler(params_handler)
if options_storage.continue_mode:
log.info(cmd_line)
else:
command = "Command line: "
for v in args:
# substituting relative paths with absolute ones (read paths, output dir path, etc)
v, prefix = support.get_option_prefix(v)
if v in options_storage.dict_of_rel2abs.keys():
v = options_storage.dict_of_rel2abs[v]
if prefix:
command += prefix + ":"
command += v + "\t"
log.info(command)
# special case
# if "mismatch_corrector" in cfg and not support.get_lib_ids_by_type(dataset_data, 'paired-end'):
# support.warning('cannot perform mismatch correction without at least one paired-end library! Skipping this step.', log)
# del cfg["mismatch_corrector"]
print_used_values(cfg, log)
log.removeHandler(params_handler)
support.check_single_reads_in_options(options, log)
if not options_storage.continue_mode:
log.info("\n======= SPAdes pipeline started. Log can be found here: " + log_filename + "\n")
if options_storage.rna and cfg["assembly"].__dict__["iterative_K"] == 'auto':
k_values = options_storage.K_MERS_RNA
if not options_storage.iontorrent:
k_values = rna_k_values(support, options_storage, dataset_data, log)
cfg["assembly"].__dict__["iterative_K"] = k_values
log.info("K values to be used: " + str(k_values))
# splitting interlaced reads and processing Ns in additional contigs if needed
if support.dataset_has_interlaced_reads(dataset_data) or support.dataset_has_additional_contigs(dataset_data)\
or support.dataset_has_nxmate_reads(dataset_data):
dir_for_split_reads = os.path.join(options_storage.output_dir, 'split_input')
if support.dataset_has_interlaced_reads(dataset_data) or support.dataset_has_nxmate_reads(dataset_data):
if not os.path.isdir(dir_for_split_reads):
os.makedirs(dir_for_split_reads)
if support.dataset_has_interlaced_reads(dataset_data):
dataset_data = support.split_interlaced_reads(dataset_data, dir_for_split_reads, log)
if support.dataset_has_nxmate_reads(dataset_data):
dataset_data = support.process_nxmate_reads(dataset_data, dir_for_split_reads, log)
if support.dataset_has_additional_contigs(dataset_data):
dataset_data = support.process_Ns_in_additional_contigs(dataset_data, dir_for_split_reads, log)
options_storage.dataset_yaml_filename = os.path.join(options_storage.output_dir, "input_dataset.yaml")
pyyaml.dump(dataset_data, open(options_storage.dataset_yaml_filename, 'w'),
default_flow_style=False, default_style='"', width=float("inf"))
cfg["dataset"].yaml_filename = options_storage.dataset_yaml_filename
try:
# copying configs before all computations (to prevent its changing at run time)
tmp_configs_dir = os.path.join(cfg["common"].output_dir, "configs")
if os.path.isdir(tmp_configs_dir) and not options_storage.continue_mode:
shutil.rmtree(tmp_configs_dir)
if not os.path.isdir(tmp_configs_dir):
if options_storage.configs_dir:
dir_util.copy_tree(options_storage.configs_dir, tmp_configs_dir, preserve_times=False, preserve_mode=False)
else:
dir_util.copy_tree(os.path.join(spades_home, "configs"), tmp_configs_dir, preserve_times=False, preserve_mode=False)
corrected_dataset_yaml_filename = ''
if "error_correction" in cfg:
STAGE_NAME = "Read error correction"
bh_cfg = merge_configs(cfg["error_correction"], cfg["common"])
corrected_dataset_yaml_filename = os.path.join(bh_cfg.output_dir, "corrected.yaml")
ec_is_needed = True
only_compressing_is_needed = False
if os.path.isfile(corrected_dataset_yaml_filename) and options_storage.continue_mode \
and not options_storage.restart_from == "ec":
if not bh_cfg.gzip_output or \
support.dataset_has_gzipped_reads(pyyaml.load(open(corrected_dataset_yaml_filename, 'r'))):
log.info("\n===== Skipping %s (already processed). \n" % STAGE_NAME)
ec_is_needed = False
else:
only_compressing_is_needed = True
if ec_is_needed:
if not only_compressing_is_needed:
support.continue_from_here(log)
if "HEAPCHECK" in os.environ:
del os.environ["HEAPCHECK"]
if "heap_check" in bh_cfg.__dict__:
os.environ["HEAPCHECK"] = bh_cfg.heap_check
if os.path.exists(bh_cfg.output_dir):
shutil.rmtree(bh_cfg.output_dir)
os.makedirs(bh_cfg.output_dir)
bh_cfg.__dict__["dataset_yaml_filename"] = cfg["dataset"].yaml_filename
log.info("\n===== %s started. \n" % STAGE_NAME)
hammer_logic.run_hammer(corrected_dataset_yaml_filename, tmp_configs_dir, bin_home, bh_cfg, dataset_data,
ext_python_modules_home, only_compressing_is_needed, log)
log.info("\n===== %s finished. \n" % STAGE_NAME)
if options_storage.stop_after == 'ec':
support.finish_here(log)
result_contigs_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_name)
result_scaffolds_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_name)
result_assembly_graph_filename = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name)
result_assembly_graph_filename_gfa = os.path.join(cfg["common"].output_dir, options_storage.assembly_graph_name_gfa)
result_contigs_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.contigs_paths)
result_scaffolds_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.scaffolds_paths)
result_transcripts_filename = os.path.join(cfg["common"].output_dir, options_storage.transcripts_name)
result_transcripts_paths_filename = os.path.join(cfg["common"].output_dir, options_storage.transcripts_paths)
truseq_long_reads_file_base = os.path.join(cfg["common"].output_dir, "truseq_long_reads")
truseq_long_reads_file = truseq_long_reads_file_base + ".fasta"
misc_dir = os.path.join(cfg["common"].output_dir, "misc")
### if mismatch correction is enabled then result contigs are copied to misc directory
assembled_contigs_filename = os.path.join(misc_dir, "assembled_contigs.fasta")
assembled_scaffolds_filename = os.path.join(misc_dir, "assembled_scaffolds.fasta")
if "assembly" in cfg and not options_storage.run_completed:
STAGE_NAME = "Assembling"
spades_cfg = merge_configs(cfg["assembly"], cfg["common"])
spades_cfg.__dict__["result_contigs"] = result_contigs_filename
spades_cfg.__dict__["result_scaffolds"] = result_scaffolds_filename
spades_cfg.__dict__["result_graph"] = result_assembly_graph_filename
spades_cfg.__dict__["result_graph_gfa"] = result_assembly_graph_filename_gfa
spades_cfg.__dict__["result_contigs_paths"] = result_contigs_paths_filename
spades_cfg.__dict__["result_scaffolds_paths"] = result_scaffolds_paths_filename
spades_cfg.__dict__["result_transcripts"] = result_transcripts_filename
spades_cfg.__dict__["result_transcripts_paths"] = result_transcripts_paths_filename
if options_storage.continue_mode and (os.path.isfile(spades_cfg.result_contigs)
or ("mismatch_corrector" in cfg and
os.path.isfile(assembled_contigs_filename))
or (options_storage.truseq_mode and os.path.isfile(assembled_scaffolds_filename)))\
and not options_storage.restart_from == 'as' \
and not options_storage.restart_from == 'scc' \
and not (options_storage.restart_from and options_storage.restart_from.startswith('k')):
log.info("\n===== Skipping %s (already processed). \n" % STAGE_NAME)
# calculating latest_dir for the next stages
latest_dir = support.get_latest_dir(os.path.join(spades_cfg.output_dir, "K*"))
if not latest_dir:
support.error("failed to continue the previous run! Please restart from previous stages or from the beginning.", log)
else:
old_result_files = [result_contigs_filename, result_scaffolds_filename,
assembled_contigs_filename, assembled_scaffolds_filename]
for old_result_file in old_result_files:
if os.path.isfile(old_result_file):
os.remove(old_result_file)
if options_storage.restart_from == 'as':
support.continue_from_here(log)
if os.path.isfile(corrected_dataset_yaml_filename):
dataset_data = pyyaml.load(open(corrected_dataset_yaml_filename, 'r'))
dataset_data = support.relative2abs_paths(dataset_data, os.path.dirname(corrected_dataset_yaml_filename))
if spades_cfg.disable_rr:
spades_cfg.__dict__["rr_enable"] = False
else:
spades_cfg.__dict__["rr_enable"] = True
if "HEAPCHECK" in os.environ:
del os.environ["HEAPCHECK"]
if "heap_check" in spades_cfg.__dict__:
os.environ["HEAPCHECK"] = spades_cfg.heap_check
log.info("\n===== %s started.\n" % STAGE_NAME)
# creating dataset
dataset_filename = os.path.join(spades_cfg.output_dir, "dataset.info")
if not os.path.isfile(dataset_filename) or not options_storage.continue_mode:
dataset_file = open(dataset_filename, 'w')
import process_cfg
if os.path.isfile(corrected_dataset_yaml_filename):
dataset_file.write("reads" + '\t' + process_cfg.process_spaces(corrected_dataset_yaml_filename) + '\n')
else:
dataset_file.write("reads" + '\t' + process_cfg.process_spaces(cfg["dataset"].yaml_filename) + '\n')
if spades_cfg.developer_mode and "reference" in cfg["dataset"].__dict__:
dataset_file.write("reference_genome" + '\t')
dataset_file.write(process_cfg.process_spaces(cfg["dataset"].reference) + '\n')
dataset_file.close()
spades_cfg.__dict__["dataset"] = dataset_filename
used_K = spades_logic.run_spades(tmp_configs_dir, bin_home, spades_cfg, dataset_data, ext_python_modules_home, log)
if os.path.isdir(misc_dir) and not options_storage.continue_mode:
shutil.rmtree(misc_dir)
if not os.path.isdir(misc_dir):
os.makedirs(misc_dir)
if options_storage.continue_mode and options_storage.restart_from and options_storage.restart_from.startswith('k'):
k_str = options_storage.restart_from[1:]
if k_str.find(":") != -1:
k_str = k_str[:k_str.find(":")]
support.error("failed to continue from K=%s because this K was not processed in the original run!" % k_str, log)
log.info("\n===== %s finished. Used k-mer sizes: %s \n" % (STAGE_NAME, ', '.join(map(str, used_K))))
if not options_storage.run_completed:
if options_storage.stop_after == 'as' or options_storage.stop_after == 'scc' or (options_storage.stop_after and options_storage.stop_after.startswith('k')):
support.finish_here(log)
#postprocessing
if cfg["run_truseq_postprocessing"] and not options_storage.run_completed:
if options_storage.continue_mode and os.path.isfile(truseq_long_reads_file_base + ".fastq") and not options_storage.restart_from == 'tpp':
log.info("\n===== Skipping %s (already processed). \n" % "TruSeq postprocessing")
else:
support.continue_from_here(log)
if os.path.isfile(result_scaffolds_filename):
shutil.move(result_scaffolds_filename, assembled_scaffolds_filename)
reads_library = dataset_data[0]
alignment_bin = os.path.join(bin_home, "spades-bwa")
alignment_dir = os.path.join(cfg["common"].output_dir, "alignment")
sam_files = alignment.align_bwa(alignment_bin, assembled_scaffolds_filename, dataset_data, alignment_dir, log, options_storage.threads)
moleculo_postprocessing.moleculo_postprocessing(assembled_scaffolds_filename, truseq_long_reads_file_base, sam_files, log)
if options_storage.stop_after == 'tpp':
support.finish_here(log)
#corrector
if "mismatch_corrector" in cfg and not options_storage.run_completed and \
(os.path.isfile(result_contigs_filename) or
(options_storage.continue_mode and os.path.isfile(assembled_contigs_filename))):
STAGE_NAME = "Mismatch correction"
to_correct = dict()
to_correct["contigs"] = (result_contigs_filename, assembled_contigs_filename)
if os.path.isfile(result_scaffolds_filename) or (options_storage.continue_mode and
os.path.isfile(assembled_scaffolds_filename)):
to_correct["scaffolds"] = (result_scaffolds_filename, assembled_scaffolds_filename)
# moving assembled contigs (scaffolds) to misc dir
for assembly_type, (old, new) in to_correct.items():
if options_storage.continue_mode and os.path.isfile(new):
continue
if os.path.isfile(old):
shutil.move(old, new)
if options_storage.continue_mode and os.path.isfile(result_contigs_filename) and \
(os.path.isfile(result_scaffolds_filename) or not os.path.isfile(assembled_scaffolds_filename)) \
and not options_storage.restart_from == 'mc':
log.info("\n===== Skipping %s (already processed). \n" % STAGE_NAME)
else:
if options_storage.restart_from == 'mc':
support.continue_from_here(log)
log.info("\n===== %s started." % STAGE_NAME)
# detecting paired-end library with the largest insert size
cfg["mismatch_corrector"].__dict__["dataset"] = cfg["dataset"].yaml_filename
#TODO: add reads orientation
import corrector_logic
corrector_cfg = cfg["mismatch_corrector"]
# processing contigs and scaffolds (or only contigs)
for assembly_type, (corrected, assembled) in to_correct.items():
if options_storage.continue_mode and os.path.isfile(corrected):
log.info("\n== Skipping processing of " + assembly_type + " (already processed)\n")
continue
if not os.path.isfile(assembled) or os.path.getsize(assembled) == 0:
log.info("\n== Skipping processing of " + assembly_type + " (empty file)\n")
continue
support.continue_from_here(log)
log.info("\n== Processing of " + assembly_type + "\n")
tmp_dir_for_corrector = os.path.join(cfg["common"].output_dir, "mismatch_corrector", assembly_type)
cfg["mismatch_corrector"].__dict__["output_dir"] = tmp_dir_for_corrector
# correcting
corr_cfg = merge_configs(cfg["mismatch_corrector"], cfg["common"])
result_corrected_filename = os.path.join(tmp_dir_for_corrector, "corrected_contigs.fasta")
corrector_logic.run_corrector( tmp_configs_dir, bin_home, corr_cfg,
ext_python_modules_home, log, assembled, result_corrected_filename)
if os.path.isfile(result_corrected_filename):
shutil.copyfile(result_corrected_filename, corrected)
tmp_d = os.path.join(tmp_dir_for_corrector, "tmp")
if os.path.isdir(tmp_d) and not cfg["common"].developer_mode:
shutil.rmtree(tmp_d)
log.info("\n===== %s finished.\n" % STAGE_NAME)
if options_storage.stop_after == 'mc':
support.finish_here(log)
if not cfg["common"].developer_mode and os.path.isdir(tmp_configs_dir):
shutil.rmtree(tmp_configs_dir)
if not options_storage.run_completed:
#log.info("")
if "error_correction" in cfg and os.path.isdir(os.path.dirname(corrected_dataset_yaml_filename)):
log.info(" * Corrected reads are in " + support.process_spaces(os.path.dirname(corrected_dataset_yaml_filename) + "/"))
if "assembly" in cfg and os.path.isfile(result_contigs_filename):
message = " * Assembled contigs are in " + support.process_spaces(result_contigs_filename)
log.info(message)
if options_storage.rna and "assembly" in cfg:
if os.path.isfile(result_transcripts_filename):
message = " * Assembled transcripts are in " + support.process_spaces(result_transcripts_filename)
log.info(message)
if os.path.isfile(result_transcripts_paths_filename):
message = " * Paths in the assembly graph corresponding to the transcripts are in " + \
support.process_spaces(result_transcripts_paths_filename)
log.info(message)
for filtering_type in options_storage.filtering_types:
result_filtered_transcripts_filename = os.path.join(cfg["common"].output_dir,
filtering_type + "_filtered_" +
options_storage.transcripts_name)
if os.path.isfile(result_filtered_transcripts_filename):
message = " * " + filtering_type.capitalize() + " filtered transcripts are in " + \
support.process_spaces(result_filtered_transcripts_filename)
log.info(message)
elif "assembly" in cfg:
if os.path.isfile(result_scaffolds_filename):
message = " * Assembled scaffolds are in " + support.process_spaces(result_scaffolds_filename)
log.info(message)
if os.path.isfile(result_assembly_graph_filename):
message = " * Assembly graph is in " + support.process_spaces(result_assembly_graph_filename)
log.info(message)
if os.path.isfile(result_assembly_graph_filename_gfa):
message = " * Assembly graph in GFA format is in " + support.process_spaces(result_assembly_graph_filename_gfa)
log.info(message)
if os.path.isfile(result_contigs_paths_filename):
message = " * Paths in the assembly graph corresponding to the contigs are in " + \
support.process_spaces(result_contigs_paths_filename)
log.info(message)
if os.path.isfile(result_scaffolds_paths_filename):
message = " * Paths in the assembly graph corresponding to the scaffolds are in " + \
support.process_spaces(result_scaffolds_paths_filename)
log.info(message)
#log.info("")
#breaking scaffolds
if os.path.isfile(result_scaffolds_filename):
if not os.path.isdir(misc_dir):
os.makedirs(misc_dir)
result_broken_scaffolds = os.path.join(misc_dir, "broken_scaffolds.fasta")
if not os.path.isfile(result_broken_scaffolds) or not options_storage.continue_mode:
modified, broken_scaffolds = support.break_scaffolds(result_scaffolds_filename,
options_storage.THRESHOLD_FOR_BREAKING_SCAFFOLDS)
if modified:
support.write_fasta(result_broken_scaffolds, broken_scaffolds)
#log.info(" * Scaffolds broken by " + str(options_storage.THRESHOLD_FOR_BREAKING_SCAFFOLDS) +
# " Ns are in " + result_broken_scaffolds)
### printing WARNINGS SUMMARY
if not support.log_warnings(log):
log.info("\n======= SPAdes pipeline finished.") # otherwise it finished WITH WARNINGS
if options_storage.test_mode:
if options_storage.truseq_mode:
if not os.path.isfile(truseq_long_reads_file):
support.error("TEST FAILED: %s does not exist!" % truseq_long_reads_file)
elif options_storage.rna:
if not os.path.isfile(result_transcripts_filename):
support.error("TEST FAILED: %s does not exist!" % result_transcripts_filename)
else:
for result_filename in [result_contigs_filename, result_scaffolds_filename]:
if os.path.isfile(result_filename):
result_fasta = list(support.read_fasta(result_filename))
# correctness check: should be one contig of length 1000 bp
correct_number = 1
if options_storage.plasmid:
correct_length = 9667
else:
correct_length = 1000
if not len(result_fasta):
support.error("TEST FAILED: %s does not contain contigs!" % result_filename)
elif len(result_fasta) > correct_number:
support.error("TEST FAILED: %s contains more than %d contig (%d)!" %
(result_filename, correct_number, len(result_fasta)))
elif len(result_fasta[0][1]) != correct_length:
if len(result_fasta[0][1]) > correct_length:
relation = "more"
else:
relation = "less"
support.error("TEST FAILED: %s contains %s than %d bp (%d bp)!" %
(result_filename, relation, correct_length, len(result_fasta[0][1])))
else:
support.error("TEST FAILED: " + result_filename + " does not exist!")
log.info("\n========= TEST PASSED CORRECTLY.")
log.info("\nSPAdes log can be found here: " + log_filename)
log.info("")
log.info("Thank you for using SPAdes!")
log.removeHandler(log_handler)
except Exception:
exc_type, exc_value, _ = sys.exc_info()
if exc_type == SystemExit:
sys.exit(exc_value)
else:
if exc_type == OSError and exc_value.errno == errno.ENOEXEC: # Exec format error
support.error("It looks like you are using SPAdes binaries for another platform.\n" +
support.get_spades_binaries_info_message())
else:
log.exception(exc_value)
support.error("exception caught: %s" % exc_type, log)
except BaseException: # since python 2.5 system-exiting exceptions (e.g. KeyboardInterrupt) are derived from BaseException
exc_type, exc_value, _ = sys.exc_info()
if exc_type == SystemExit:
sys.exit(exc_value)
else:
log.exception(exc_value)
support.error("exception caught: %s" % exc_type, log)
if __name__ == '__main__':
main(sys.argv)
|
kohnle-lernmodule/palama
|
refs/heads/master
|
twisted/scripts/twistd.py
|
19
|
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.python import log, syslog
from twisted.python.util import switchUID
from twisted.application import app, service
from twisted.scripts import mktap
from twisted import copyright
import os, errno, sys
class ServerOptions(app.ServerOptions):
synopsis = "Usage: twistd [options]"
optFlags = [['nodaemon','n', "don't daemonize"],
['quiet', 'q', "No-op for backwards compatability."],
['originalname', None, "Don't try to change the process name"],
['syslog', None, "Log to syslog, not to file"],
['euid', '',
"Set only effective user-id rather than real user-id. "
"(This option has no effect unless the server is running as "
"root, in which case it means not to shed all privileges "
"after binding ports, retaining the option to regain "
"privileges in cases such as spawning processes. "
"Use with caution.)"],
]
optParameters = [
['prefix', None,'twisted',
"use the given prefix when syslogging"],
['pidfile','','twistd.pid',
"Name of the pidfile"],
['chroot', None, None,
'Chroot to a supplied directory before running'],
['uid', 'u', None, "The uid to run as."],
['gid', 'g', None, "The gid to run as."],
]
zsh_altArgDescr = {"prefix":"Use the given prefix when syslogging (default: twisted)",
"pidfile":"Name of the pidfile (default: twistd.pid)",}
#zsh_multiUse = ["foo", "bar"]
#zsh_mutuallyExclusive = [("foo", "bar"), ("bar", "baz")]
zsh_actions = {"pidfile":'_files -g "*.pid"', "chroot":'_dirs'}
zsh_actionDescr = {"chroot":"chroot directory"}
def opt_version(self):
"""Print version information and exit.
"""
print 'twistd (the Twisted daemon) %s' % copyright.version
print copyright.copyright
sys.exit()
def postOptions(self):
app.ServerOptions.postOptions(self)
if self['pidfile']:
self['pidfile'] = os.path.abspath(self['pidfile'])
def checkPID(pidfile):
if not pidfile:
return
if os.path.exists(pidfile):
try:
pid = int(open(pidfile).read())
except ValueError:
sys.exit('Pidfile %s contains non-numeric value' % pidfile)
try:
os.kill(pid, 0)
except OSError, why:
if why[0] == errno.ESRCH:
# The pid doesnt exists.
log.msg('Removing stale pidfile %s' % pidfile, isError=True)
os.remove(pidfile)
else:
sys.exit("Can't check status of PID %s from pidfile %s: %s" %
(pid, pidfile, why[1]))
else:
sys.exit("""\
Another twistd server is running, PID %s\n
This could either be a previously started instance of your application or a
different application entirely. To start a new one, either run it in some other
directory, or use the --pidfile and --logfile parameters to avoid clashes.
""" % pid)
def removePID(pidfile):
if not pidfile:
return
try:
os.unlink(pidfile)
except OSError, e:
if e.errno == errno.EACCES or e.errno == errno.EPERM:
log.msg("Warning: No permission to delete pid file")
else:
log.msg("Failed to unlink PID file:")
log.deferr()
except:
log.msg("Failed to unlink PID file:")
log.deferr()
def startLogging(logfilename, sysLog, prefix, nodaemon):
if logfilename == '-':
if not nodaemon:
print 'daemons cannot log to stdout'
os._exit(1)
logFile = sys.stdout
elif sysLog:
syslog.startLogging(prefix)
elif nodaemon and not logfilename:
logFile = sys.stdout
else:
logFile = app.getLogFile(logfilename or 'twistd.log')
try:
import signal
except ImportError:
pass
else:
def rotateLog(signal, frame):
from twisted.internet import reactor
reactor.callFromThread(logFile.rotate)
signal.signal(signal.SIGUSR1, rotateLog)
if not sysLog:
log.startLogging(logFile)
sys.stdout.flush()
def daemonize():
# See http://www.erlenstar.demon.co.uk/unix/faq_toc.html#TOC16
if os.fork(): # launch child and...
os._exit(0) # kill off parent
os.setsid()
if os.fork(): # launch child and...
os._exit(0) # kill off parent again.
os.umask(077)
null=os.open('/dev/null', os.O_RDWR)
for i in range(3):
try:
os.dup2(null, i)
except OSError, e:
if e.errno != errno.EBADF:
raise
os.close(null)
def shedPrivileges(euid, uid, gid):
if uid is not None or gid is not None:
switchUID(uid, gid, euid)
extra = euid and 'e' or ''
log.msg('set %suid/%sgid %s/%s' % (extra, extra, uid, gid))
def launchWithName(name):
if name and name != sys.argv[0]:
exe = os.path.realpath(sys.executable)
log.msg('Changing process name to ' + name)
os.execv(exe, [name, sys.argv[0], '--originalname']+sys.argv[1:])
def setupEnvironment(config):
if config['chroot'] is not None:
os.chroot(config['chroot'])
if config['rundir'] == '.':
config['rundir'] = '/'
os.chdir(config['rundir'])
if not config['nodaemon']:
daemonize()
if config['pidfile']:
open(config['pidfile'],'wb').write(str(os.getpid()))
def startApplication(config, application):
process = service.IProcess(application, None)
if not config['originalname']:
launchWithName(process.processName)
setupEnvironment(config)
service.IService(application).privilegedStartService()
uid, gid = mktap.getid(config['uid'], config['gid'])
if uid is None:
uid = process.uid
if gid is None:
gid = process.gid
shedPrivileges(config['euid'], uid, gid)
app.startApplication(application, not config['no_save'])
def runApp(config):
checkPID(config['pidfile'])
passphrase = app.getPassphrase(config['encrypted'])
app.installReactor(config['reactor'])
config['nodaemon'] = config['nodaemon'] or config['debug']
oldstdout = sys.stdout
oldstderr = sys.stderr
startLogging(config['logfile'], config['syslog'], config['prefix'],
config['nodaemon'])
app.initialLog()
application = app.getApplication(config, passphrase)
startApplication(config, application)
app.runReactorWithLogging(config, oldstdout, oldstderr)
removePID(config['pidfile'])
app.reportProfile(config['report-profile'],
service.IProcess(application).processName)
log.msg("Server Shut Down.")
def run():
app.run(runApp, ServerOptions)
|
kramwens/order_bot
|
refs/heads/master
|
venv/lib/python2.7/site-packages/setuptools/msvc.py
|
89
|
"""
Improved support for Microsoft Visual C++ compilers.
Known supported compilers:
--------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64);
Microsoft Windows SDK 7.0 (x86, x64, ia64);
Microsoft Windows SDK 6.1 (x86, x64, ia64)
Microsoft Visual C++ 10.0:
Microsoft Windows SDK 7.1 (x86, x64, ia64)
Microsoft Visual C++ 14.0:
Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
"""
import os
import sys
import platform
import itertools
import distutils.errors
from pkg_resources.extern.packaging.version import LegacyVersion
from setuptools.extern.six.moves import filterfalse
from .monkey import get_unpatched
if platform.system() == 'Windows':
from setuptools.extern.six.moves import winreg
safe_env = os.environ
else:
"""
Mock winreg and environ so the module can be imported
on this platform.
"""
class winreg:
HKEY_USERS = None
HKEY_CURRENT_USER = None
HKEY_LOCAL_MACHINE = None
HKEY_CLASSES_ROOT = None
safe_env = dict()
try:
from distutils.msvc9compiler import Reg
except ImportError:
pass
def msvc9_find_vcvarsall(version):
"""
Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
compiler build for Python (VCForPython). Fall back to original behavior
when the standalone compiler is not available.
Redirect the path of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
Parameters
----------
version: float
Required Microsoft Visual C++ version.
Return
------
vcvarsall.bat path: str
"""
VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
key = VC_BASE % ('', version)
try:
# Per-user installs register the compiler path here
productdir = Reg.get_value(key, "installdir")
except KeyError:
try:
# All-user installs on a 64-bit system register here
key = VC_BASE % ('Wow6432Node\\', version)
productdir = Reg.get_value(key, "installdir")
except KeyError:
productdir = None
if productdir:
vcvarsall = os.path.os.path.join(productdir, "vcvarsall.bat")
if os.path.isfile(vcvarsall):
return vcvarsall
return get_unpatched(msvc9_find_vcvarsall)(version)
def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
"""
Patched "distutils.msvc9compiler.query_vcvarsall" for support standalones
compilers.
Set environment without use of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 9.0:
Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64);
Microsoft Windows SDK 7.0 (x86, x64, ia64);
Microsoft Windows SDK 6.1 (x86, x64, ia64)
Microsoft Visual C++ 10.0:
Microsoft Windows SDK 7.1 (x86, x64, ia64)
Parameters
----------
ver: float
Required Microsoft Visual C++ version.
arch: str
Target architecture.
Return
------
environment: dict
"""
# Try to get environement from vcvarsall.bat (Classical way)
try:
orig = get_unpatched(msvc9_query_vcvarsall)
return orig(ver, arch, *args, **kwargs)
except distutils.errors.DistutilsPlatformError:
# Pass error if Vcvarsall.bat is missing
pass
except ValueError:
# Pass error if environment not set after executing vcvarsall.bat
pass
# If error, try to set environment directly
try:
return EnvironmentInfo(arch, ver).return_env()
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, ver, arch)
raise
def msvc14_get_vc_env(plat_spec):
"""
Patched "distutils._msvccompiler._get_vc_env" for support standalones
compilers.
Set environment without use of "vcvarsall.bat".
Known supported compilers
-------------------------
Microsoft Visual C++ 14.0:
Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
Parameters
----------
plat_spec: str
Target architecture.
Return
------
environment: dict
"""
# Try to get environment from vcvarsall.bat (Classical way)
try:
return get_unpatched(msvc14_get_vc_env)(plat_spec)
except distutils.errors.DistutilsPlatformError:
# Pass error Vcvarsall.bat is missing
pass
# If error, try to set environment directly
try:
return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env()
except distutils.errors.DistutilsPlatformError as exc:
_augment_exception(exc, 14.0)
raise
def msvc14_gen_lib_options(*args, **kwargs):
"""
Patched "distutils._msvccompiler.gen_lib_options" for fix
compatibility between "numpy.distutils" and "distutils._msvccompiler"
(for Numpy < 1.11.2)
"""
if "numpy.distutils" in sys.modules:
import numpy as np
if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
def _augment_exception(exc, version, arch=''):
"""
Add details to the exception message to help guide the user
as to what action will resolve it.
"""
# Error if MSVC++ directory not found or environment not set
message = exc.args[0]
if "vcvarsall" in message.lower() or "visual c" in message.lower():
# Special error message if MSVC++ not installed
tmpl = 'Microsoft Visual C++ {version:0.1f} is required.'
message = tmpl.format(**locals())
msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
if version == 9.0:
if arch.lower().find('ia64') > -1:
# For VC++ 9.0, if IA64 support is needed, redirect user
# to Windows SDK 7.0
message += ' Get it with "Microsoft Windows SDK 7.0": '
message += msdownload % 3138
else:
# For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
# This redirection link is maintained by Microsoft.
# Contact vspython@microsoft.com if it needs updating.
message += ' Get it from http://aka.ms/vcpython27'
elif version == 10.0:
# For VC++ 10.0 Redirect user to Windows SDK 7.1
message += ' Get it with "Microsoft Windows SDK 7.1": '
message += msdownload % 8279
elif version >= 14.0:
# For VC++ 14.0 Redirect user to Visual C++ Build Tools
message += (' Get it with "Microsoft Visual C++ Build Tools": '
r'http://landinghub.visualstudio.com/'
'visual-cpp-build-tools')
exc.args = (message, )
class PlatformInfo:
"""
Current and Target Architectures informations.
Parameters
----------
arch: str
Target architecture.
"""
current_cpu = safe_env.get('processor_architecture', '').lower()
def __init__(self, arch):
self.arch = arch.lower().replace('x64', 'amd64')
@property
def target_cpu(self):
return self.arch[self.arch.find('_') + 1:]
def target_is_x86(self):
return self.target_cpu == 'x86'
def current_is_x86(self):
return self.current_cpu == 'x86'
def current_dir(self, hidex86=False, x64=False):
"""
Current platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
subfolder: str
'\target', or '' (see hidex86 parameter)
"""
return (
'' if (self.current_cpu == 'x86' and hidex86) else
r'\x64' if (self.current_cpu == 'amd64' and x64) else
r'\%s' % self.current_cpu
)
def target_dir(self, hidex86=False, x64=False):
"""
Target platform specific subfolder.
Parameters
----------
hidex86: bool
return '' and not '\x86' if architecture is x86.
x64: bool
return '\x64' and not '\amd64' if architecture is amd64.
Return
------
subfolder: str
'\current', or '' (see hidex86 parameter)
"""
return (
'' if (self.target_cpu == 'x86' and hidex86) else
r'\x64' if (self.target_cpu == 'amd64' and x64) else
r'\%s' % self.target_cpu
)
def cross_dir(self, forcex86=False):
"""
Cross platform specific subfolder.
Parameters
----------
forcex86: bool
Use 'x86' as current architecture even if current acritecture is
not x86.
Return
------
subfolder: str
'' if target architecture is current architecture,
'\current_target' if not.
"""
current = 'x86' if forcex86 else self.current_cpu
return (
'' if self.target_cpu == current else
self.target_dir().replace('\\', '\\%s_' % current)
)
class RegistryInfo:
"""
Microsoft Visual Studio related registry informations.
Parameters
----------
platform_info: PlatformInfo
"PlatformInfo" instance.
"""
HKEYS = (winreg.HKEY_USERS,
winreg.HKEY_CURRENT_USER,
winreg.HKEY_LOCAL_MACHINE,
winreg.HKEY_CLASSES_ROOT)
def __init__(self, platform_info):
self.pi = platform_info
@property
def visualstudio(self):
"""
Microsoft Visual Studio root registry key.
"""
return 'VisualStudio'
@property
def sxs(self):
"""
Microsoft Visual Studio SxS registry key.
"""
return os.path.join(self.visualstudio, 'SxS')
@property
def vc(self):
"""
Microsoft Visual C++ VC7 registry key.
"""
return os.path.join(self.sxs, 'VC7')
@property
def vs(self):
"""
Microsoft Visual Studio VS7 registry key.
"""
return os.path.join(self.sxs, 'VS7')
@property
def vc_for_python(self):
"""
Microsoft Visual C++ for Python registry key.
"""
return r'DevDiv\VCForPython'
@property
def microsoft_sdk(self):
"""
Microsoft SDK registry key.
"""
return 'Microsoft SDKs'
@property
def windows_sdk(self):
"""
Microsoft Windows/Platform SDK registry key.
"""
return os.path.join(self.microsoft_sdk, 'Windows')
@property
def netfx_sdk(self):
"""
Microsoft .NET Framework SDK registry key.
"""
return os.path.join(self.microsoft_sdk, 'NETFXSDK')
@property
def windows_kits_roots(self):
"""
Microsoft Windows Kits Roots registry key.
"""
return r'Windows Kits\Installed Roots'
def microsoft(self, key, x86=False):
"""
Return key in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
x86: str
Force x86 software registry.
Return
------
str: value
"""
node64 = '' if self.pi.current_is_x86() or x86 else r'\Wow6432Node'
return os.path.join('Software', node64, 'Microsoft', key)
def lookup(self, key, name):
"""
Look for values in registry in Microsoft software registry.
Parameters
----------
key: str
Registry key path where look.
name: str
Value name to find.
Return
------
str: value
"""
KEY_READ = winreg.KEY_READ
openkey = winreg.OpenKey
ms = self.microsoft
for hkey in self.HKEYS:
try:
bkey = openkey(hkey, ms(key), 0, KEY_READ)
except (OSError, IOError):
if not self.pi.current_is_x86():
try:
bkey = openkey(hkey, ms(key, True), 0, KEY_READ)
except (OSError, IOError):
continue
else:
continue
try:
return winreg.QueryValueEx(bkey, name)[0]
except (OSError, IOError):
pass
class SystemInfo:
"""
Microsoft Windows and Visual Studio related system inormations.
Parameters
----------
registry_info: RegistryInfo
"RegistryInfo" instance.
vc_ver: float
Required Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparaison.
WinDir = safe_env.get('WinDir', '')
ProgramFiles = safe_env.get('ProgramFiles', '')
ProgramFilesx86 = safe_env.get('ProgramFiles(x86)', ProgramFiles)
def __init__(self, registry_info, vc_ver=None):
self.ri = registry_info
self.pi = self.ri.pi
if vc_ver:
self.vc_ver = vc_ver
else:
try:
self.vc_ver = self.find_available_vc_vers()[-1]
except IndexError:
err = 'No Microsoft Visual C++ version found'
raise distutils.errors.DistutilsPlatformError(err)
def find_available_vc_vers(self):
"""
Find all available Microsoft Visual C++ versions.
"""
vckeys = (self.ri.vc, self.ri.vc_for_python)
vc_vers = []
for hkey in self.ri.HKEYS:
for key in vckeys:
try:
bkey = winreg.OpenKey(hkey, key, 0, winreg.KEY_READ)
except (OSError, IOError):
continue
subkeys, values, _ = winreg.QueryInfoKey(bkey)
for i in range(values):
try:
ver = float(winreg.EnumValue(bkey, i)[0])
if ver not in vc_vers:
vc_vers.append(ver)
except ValueError:
pass
for i in range(subkeys):
try:
ver = float(winreg.EnumKey(bkey, i))
if ver not in vc_vers:
vc_vers.append(ver)
except ValueError:
pass
return sorted(vc_vers)
@property
def VSInstallDir(self):
"""
Microsoft Visual Studio directory.
"""
# Default path
name = 'Microsoft Visual Studio %0.1f' % self.vc_ver
default = os.path.join(self.ProgramFilesx86, name)
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vs, '%0.1f' % self.vc_ver) or default
@property
def VCInstallDir(self):
"""
Microsoft Visual C++ directory.
"""
# Default path
default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver
guess_vc = os.path.join(self.ProgramFilesx86, default)
# Try to get "VC++ for Python" path from registry as default path
reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
python_vc = self.ri.lookup(reg_path, 'installdir')
default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc
# Try to get path from registry, if fail use default path
path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc
if not os.path.isdir(path):
msg = 'Microsoft Visual C++ directory not found'
raise distutils.errors.DistutilsPlatformError(msg)
return path
@property
def WindowsSdkVersion(self):
"""
Microsoft Windows SDK versions.
"""
# Set Windows SDK versions for specified MSVC++ version
if self.vc_ver <= 9.0:
return ('7.0', '6.1', '6.0a')
elif self.vc_ver == 10.0:
return ('7.1', '7.0a')
elif self.vc_ver == 11.0:
return ('8.0', '8.0a')
elif self.vc_ver == 12.0:
return ('8.1', '8.1a')
elif self.vc_ver >= 14.0:
return ('10.0', '8.1')
@property
def WindowsSdkDir(self):
"""
Microsoft Windows SDK directory.
"""
sdkdir = ''
for ver in self.WindowsSdkVersion:
# Try to get it from registry
loc = os.path.join(self.ri.windows_sdk, 'v%s' % ver)
sdkdir = self.ri.lookup(loc, 'installationfolder')
if sdkdir:
break
if not sdkdir or not os.path.isdir(sdkdir):
# Try to get "VC++ for Python" version from registry
path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
install_base = self.ri.lookup(path, 'installdir')
if install_base:
sdkdir = os.path.join(install_base, 'WinSDK')
if not sdkdir or not os.path.isdir(sdkdir):
# If fail, use default new path
for ver in self.WindowsSdkVersion:
intver = ver[:ver.rfind('.')]
path = r'Microsoft SDKs\Windows Kits\%s' % (intver)
d = os.path.join(self.ProgramFiles, path)
if os.path.isdir(d):
sdkdir = d
if not sdkdir or not os.path.isdir(sdkdir):
# If fail, use default old path
for ver in self.WindowsSdkVersion:
path = r'Microsoft SDKs\Windows\v%s' % ver
d = os.path.join(self.ProgramFiles, path)
if os.path.isdir(d):
sdkdir = d
if not sdkdir:
# If fail, use Platform SDK
sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK')
return sdkdir
@property
def WindowsSDKExecutablePath(self):
"""
Microsoft Windows SDK executable directory.
"""
# Find WinSDK NetFx Tools registry dir name
if self.vc_ver <= 11.0:
netfxver = 35
arch = ''
else:
netfxver = 40
hidex86 = True if self.vc_ver <= 12.0 else False
arch = self.pi.current_dir(x64=True, hidex86=hidex86)
fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
# liste all possibles registry paths
regpaths = []
if self.vc_ver >= 14.0:
for ver in self.NetFxSdkVersion:
regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)]
for ver in self.WindowsSdkVersion:
regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
# Return installation folder from the more recent path
for path in regpaths:
execpath = self.ri.lookup(path, 'installationfolder')
if execpath:
break
return execpath
@property
def FSharpInstallDir(self):
"""
Microsoft Visual F# directory.
"""
path = r'%0.1f\Setup\F#' % self.vc_ver
path = os.path.join(self.ri.visualstudio, path)
return self.ri.lookup(path, 'productdir') or ''
@property
def UniversalCRTSdkDir(self):
"""
Microsoft Universal CRT SDK directory.
"""
# Set Kit Roots versions for specified MSVC++ version
if self.vc_ver >= 14.0:
vers = ('10', '81')
else:
vers = ()
# Find path of the more recent Kit
for ver in vers:
sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
'kitsroot%s' % ver)
if sdkdir:
break
return sdkdir or ''
@property
def NetFxSdkVersion(self):
"""
Microsoft .NET Framework SDK versions.
"""
# Set FxSdk versions for specified MSVC++ version
if self.vc_ver >= 14.0:
return ('4.6.1', '4.6')
else:
return ()
@property
def NetFxSdkDir(self):
"""
Microsoft .NET Framework SDK directory.
"""
for ver in self.NetFxSdkVersion:
loc = os.path.join(self.ri.netfx_sdk, ver)
sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
if sdkdir:
break
return sdkdir or ''
@property
def FrameworkDir32(self):
"""
Microsoft .NET Framework 32bit directory.
"""
# Default path
guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
@property
def FrameworkDir64(self):
"""
Microsoft .NET Framework 64bit directory.
"""
# Default path
guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64')
# Try to get path from registry, if fail use default path
return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
@property
def FrameworkVersion32(self):
"""
Microsoft .NET Framework 32bit versions.
"""
return self._find_dot_net_versions(32)
@property
def FrameworkVersion64(self):
"""
Microsoft .NET Framework 64bit versions.
"""
return self._find_dot_net_versions(64)
def _find_dot_net_versions(self, bits=32):
"""
Find Microsoft .NET Framework versions.
Parameters
----------
bits: int
Platform number of bits: 32 or 64.
"""
# Find actual .NET version
ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) or ''
# Set .NET versions for specified MSVC++ version
if self.vc_ver >= 12.0:
frameworkver = (ver, 'v4.0')
elif self.vc_ver >= 10.0:
frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver,
'v3.5')
elif self.vc_ver == 9.0:
frameworkver = ('v3.5', 'v2.0.50727')
if self.vc_ver == 8.0:
frameworkver = ('v3.0', 'v2.0.50727')
return frameworkver
class EnvironmentInfo:
"""
Return environment variables for specified Microsoft Visual C++ version
and platform : Lib, Include, Path and libpath.
This function is compatible with Microsoft Visual C++ 9.0 to 14.0.
Script created by analysing Microsoft environment configuration files like
"vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
Parameters
----------
arch: str
Target architecture.
vc_ver: float
Required Microsoft Visual C++ version. If not set, autodetect the last
version.
vc_min_ver: float
Minimum Microsoft Visual C++ version.
"""
# Variables and properties in this class use originals CamelCase variables
# names from Microsoft source files for more easy comparaison.
def __init__(self, arch, vc_ver=None, vc_min_ver=None):
self.pi = PlatformInfo(arch)
self.ri = RegistryInfo(self.pi)
self.si = SystemInfo(self.ri, vc_ver)
if vc_min_ver:
if self.vc_ver < vc_min_ver:
err = 'No suitable Microsoft Visual C++ version found'
raise distutils.errors.DistutilsPlatformError(err)
@property
def vc_ver(self):
"""
Microsoft Visual C++ version.
"""
return self.si.vc_ver
@property
def VSTools(self):
"""
Microsoft Visual Studio Tools
"""
paths = [r'Common7\IDE', r'Common7\Tools']
if self.vc_ver >= 14.0:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
paths += [r'Team Tools\Performance Tools']
paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
return [os.path.join(self.si.VSInstallDir, path) for path in paths]
@property
def VCIncludes(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Includes
"""
return [os.path.join(self.si.VCInstallDir, 'Include'),
os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')]
@property
def VCLibraries(self):
"""
Microsoft Visual C++ & Microsoft Foundation Class Libraries
"""
arch_subdir = self.pi.target_dir(hidex86=True)
paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
if self.vc_ver >= 14.0:
paths += [r'Lib\store%s' % arch_subdir]
return [os.path.join(self.si.VCInstallDir, path) for path in paths]
@property
def VCStoreRefs(self):
"""
Microsoft Visual C++ store references Libraries
"""
if self.vc_ver < 14.0:
return []
return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')]
@property
def VCTools(self):
"""
Microsoft Visual C++ Tools
"""
si = self.si
tools = [os.path.join(si.VCInstallDir, 'VCPackages')]
forcex86 = True if self.vc_ver <= 10.0 else False
arch_subdir = self.pi.cross_dir(forcex86)
if arch_subdir:
tools += [os.path.join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
if self.vc_ver >= 14.0:
path = 'Bin%s' % self.pi.current_dir(hidex86=True)
tools += [os.path.join(si.VCInstallDir, path)]
else:
tools += [os.path.join(si.VCInstallDir, 'Bin')]
return tools
@property
def OSLibraries(self):
"""
Microsoft Windows SDK Libraries
"""
if self.vc_ver <= 10.0:
arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
return [os.path.join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
else:
arch_subdir = self.pi.target_dir(x64=True)
lib = os.path.join(self.si.WindowsSdkDir, 'lib')
libver = self._get_content_dirname(lib)
return [os.path.join(lib, '%sum%s' % (libver, arch_subdir))]
@property
def OSIncludes(self):
"""
Microsoft Windows SDK Include
"""
include = os.path.join(self.si.WindowsSdkDir, 'include')
if self.vc_ver <= 10.0:
return [include, os.path.join(include, 'gl')]
else:
if self.vc_ver >= 14.0:
sdkver = self._get_content_dirname(include)
else:
sdkver = ''
return [os.path.join(include, '%sshared' % sdkver),
os.path.join(include, '%sum' % sdkver),
os.path.join(include, '%swinrt' % sdkver)]
@property
def OSLibpath(self):
"""
Microsoft Windows SDK Libraries Paths
"""
ref = os.path.join(self.si.WindowsSdkDir, 'References')
libpath = []
if self.vc_ver <= 9.0:
libpath += self.OSLibraries
if self.vc_ver >= 11.0:
libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')]
if self.vc_ver >= 14.0:
libpath += [
ref,
os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'),
os.path.join(
ref,
'Windows.Foundation.UniversalApiContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Foundation.FoundationContract',
'1.0.0.0',
),
os.path.join(
ref,
'Windows.Networking.Connectivity.WwanContract',
'1.0.0.0',
),
os.path.join(
self.si.WindowsSdkDir,
'ExtensionSDKs',
'Microsoft.VCLibs',
'%0.1f' % self.vc_ver,
'References',
'CommonConfiguration',
'neutral',
),
]
return libpath
@property
def SdkTools(self):
"""
Microsoft Windows SDK Tools
"""
bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86'
tools = [os.path.join(self.si.WindowsSdkDir, bin_dir)]
if not self.pi.current_is_x86():
arch_subdir = self.pi.current_dir(x64=True)
path = 'Bin%s' % arch_subdir
tools += [os.path.join(self.si.WindowsSdkDir, path)]
if self.vc_ver == 10.0 or self.vc_ver == 11.0:
if self.pi.target_is_x86():
arch_subdir = ''
else:
arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
tools += [os.path.join(self.si.WindowsSdkDir, path)]
if self.si.WindowsSDKExecutablePath:
tools += [self.si.WindowsSDKExecutablePath]
return tools
@property
def SdkSetup(self):
"""
Microsoft Windows SDK Setup
"""
if self.vc_ver > 9.0:
return []
return [os.path.join(self.si.WindowsSdkDir, 'Setup')]
@property
def FxTools(self):
"""
Microsoft .NET Framework Tools
"""
pi = self.pi
si = self.si
if self.vc_ver <= 10.0:
include32 = True
include64 = not pi.target_is_x86() and not pi.current_is_x86()
else:
include32 = pi.target_is_x86() or pi.current_is_x86()
include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
tools = []
if include32:
tools += [os.path.join(si.FrameworkDir32, ver)
for ver in si.FrameworkVersion32]
if include64:
tools += [os.path.join(si.FrameworkDir64, ver)
for ver in si.FrameworkVersion64]
return tools
@property
def NetFxSDKLibraries(self):
"""
Microsoft .Net Framework SDK Libraries
"""
if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
return []
arch_subdir = self.pi.target_dir(x64=True)
return [os.path.join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
@property
def NetFxSDKIncludes(self):
"""
Microsoft .Net Framework SDK Includes
"""
if self.vc_ver < 14.0 or not self.si.NetFxSdkDir:
return []
return [os.path.join(self.si.NetFxSdkDir, r'include\um')]
@property
def VsTDb(self):
"""
Microsoft Visual Studio Team System Database
"""
return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
@property
def MSBuild(self):
"""
Microsoft Build Engine
"""
if self.vc_ver < 12.0:
return []
arch_subdir = self.pi.current_dir(hidex86=True)
path = r'MSBuild\%0.1f\bin%s' % (self.vc_ver, arch_subdir)
return [os.path.join(self.si.ProgramFilesx86, path)]
@property
def HTMLHelpWorkshop(self):
"""
Microsoft HTML Help Workshop
"""
if self.vc_ver < 11.0:
return []
return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
@property
def UCRTLibraries(self):
"""
Microsoft Universal CRT Libraries
"""
if self.vc_ver < 14.0:
return []
arch_subdir = self.pi.target_dir(x64=True)
lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib')
ucrtver = self._get_content_dirname(lib)
return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
@property
def UCRTIncludes(self):
"""
Microsoft Universal CRT Include
"""
if self.vc_ver < 14.0:
return []
include = os.path.join(self.si.UniversalCRTSdkDir, 'include')
ucrtver = self._get_content_dirname(include)
return [os.path.join(include, '%sucrt' % ucrtver)]
@property
def FSharp(self):
"""
Microsoft Visual F#
"""
if self.vc_ver < 11.0 and self.vc_ver > 12.0:
return []
return self.si.FSharpInstallDir
@property
def VCRuntimeRedist(self):
"""
Microsoft Visual C++ runtime redistribuable dll
"""
arch_subdir = self.pi.target_dir(x64=True)
vcruntime = 'redist%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll'
vcruntime = vcruntime % (arch_subdir, self.vc_ver, self.vc_ver)
return os.path.join(self.si.VCInstallDir, vcruntime)
def return_env(self, exists=True):
"""
Return environment dict.
Parameters
----------
exists: bool
It True, only return existing paths.
"""
env = dict(
include=self._build_paths('include',
[self.VCIncludes,
self.OSIncludes,
self.UCRTIncludes,
self.NetFxSDKIncludes],
exists),
lib=self._build_paths('lib',
[self.VCLibraries,
self.OSLibraries,
self.FxTools,
self.UCRTLibraries,
self.NetFxSDKLibraries],
exists),
libpath=self._build_paths('libpath',
[self.VCLibraries,
self.FxTools,
self.VCStoreRefs,
self.OSLibpath],
exists),
path=self._build_paths('path',
[self.VCTools,
self.VSTools,
self.VsTDb,
self.SdkTools,
self.SdkSetup,
self.FxTools,
self.MSBuild,
self.HTMLHelpWorkshop,
self.FSharp],
exists),
)
if self.vc_ver >= 14 and os.path.isfile(self.VCRuntimeRedist):
env['py_vcruntime_redist'] = self.VCRuntimeRedist
return env
def _build_paths(self, name, spec_path_lists, exists):
"""
Given an environment variable name and specified paths,
return a pathsep-separated string of paths containing
unique, extant, directories from those paths and from
the environment variable. Raise an error if no paths
are resolved.
"""
# flatten spec_path_lists
spec_paths = itertools.chain.from_iterable(spec_path_lists)
env_paths = safe_env.get(name, '').split(os.pathsep)
paths = itertools.chain(spec_paths, env_paths)
extant_paths = list(filter(os.path.isdir, paths)) if exists else paths
if not extant_paths:
msg = "%s environment variable is empty" % name.upper()
raise distutils.errors.DistutilsPlatformError(msg)
unique_paths = self._unique_everseen(extant_paths)
return os.pathsep.join(unique_paths)
# from Python docs
def _unique_everseen(self, iterable, key=None):
"""
List unique elements, preserving order.
Remember all elements ever seen.
_unique_everseen('AAAABBBCCDAABBB') --> A B C D
_unique_everseen('ABBCcAD', str.lower) --> A B C D
"""
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def _get_content_dirname(self, path):
"""
Return name of the first dir in path or '' if no dir found.
Parameters
----------
path: str
Path where search dir.
Return
------
foldername: str
"name\" or ""
"""
try:
name = os.listdir(path)
if name:
return '%s\\' % name[0]
return ''
except (OSError, IOError):
return ''
|
Microsoft/PTVS
|
refs/heads/master
|
Python/Tests/TestData/ReplSysPath/Program.py
|
12133432
| |
synergeticsedx/deployment-wipro
|
refs/heads/oxa/master.fic
|
lms/djangoapps/instructor/tests/views/__init__.py
|
12133432
| |
QUANTAXIS/QUANTAXIS
|
refs/heads/master
|
QUANTAXIS_Test/__init__.py
|
12133432
| |
biicode/common
|
refs/heads/develop
|
test/edition/parsing/nodejs/__init__.py
|
12133432
| |
boldprogressives/django-apihangar
|
refs/heads/master
|
apihangar/__init__.py
|
12133432
| |
ArchiFleKs/magnum
|
refs/heads/master
|
magnum/tests/unit/db/sqlalchemy/__init__.py
|
12133432
| |
fdlm/simplehmm-python
|
refs/heads/master
|
hmm.py
|
1
|
from math import log
from itertools import izip
class HMM:
def __init__(self, pi, A, B):
self.pi = pi
self.A = A
self.B = B
def normalise(l):
norm_const = sum(l)
return map(lambda x: x / norm_const, l), norm_const
def find_max(l):
m = max(l)
i = l.index(m)
return m, i
def forward(model, observations):
state_idxs = range(len(model.pi))
log_prob = 0.
alphas = [[model.pi[i] * model.B[i][observations[0]] for i in state_idxs]]
alphas[0], nc = normalise(alphas[0])
log_prob += log(nc)
for obs in observations[1:]:
alphas += [[sum([alphas[-1][j] * model.A[j][i] for j in state_idxs]) * model.B[i][obs] for i in state_idxs]]
alphas[-1], nc = normalise(alphas[-1])
log_prob += log(nc)
return alphas, log_prob
def backward(model, observations):
state_idxs = range(len(model.pi))
betas = [[1] * len(model.pi)]
for obs in observations[::-1]:
beta = [sum([betas[0][j] * model.B[j][obs] * model.A[i][j] for j in state_idxs]) for i in state_idxs]
betas.insert(0, normalise(beta)[0])
return betas
def forward_backward(model, observations):
alphas, logprob = forward(model, observations)
betas = backward(model, observations)
gammas = [normalise([a * b for a, b in izip(alpha, beta)])[0] for alpha, beta in izip(alphas, betas[1:])]
return gammas, logprob
def viterbi(model, observations):
state_idxs = range(len(model.pi))
deltas = [[]]
psis = []
deltas[0] = forward(model, observations[:1])[0][0]
for obs in observations[1:]:
trans, from_state = izip(*[find_max([deltas[-1][j] * model.A[j][i] for j in state_idxs]) for i in state_idxs])
deltas += [normalise([trans[i] * model.B[i][obs] for i in state_idxs])[0]]
psis += [from_state]
path = [find_max(deltas[-1])[1]]
for psi in psis[::-1]:
path.insert(0, psi[path[0]])
return path
if __name__ == "__main__":
A = [[0.5, 0.3, 0.2],[0.2, 0.6, 0.2],[0.1, 0.2, 0.7]]
B = [[0.1, 0.9], [0.4, 0.6], [0.9, 0.1]]
pi = [0.3, 0.5, 0.2]
obs = [1, 0, 0, 0, 1, 1, 0, 1, 1]
model = HMM(pi, A, B)
print forward(model, obs)
print forward_backward(model, obs)
print viterbi(model, obs)
|
AizazSharif/aizazsharif.github.io
|
refs/heads/master
|
markdown_generator/talks.py
|
199
|
# coding: utf-8
# # Talks markdown generator for academicpages
#
# Takes a TSV of talks with metadata and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `talks.py`. Run either from the `markdown_generator` folder after replacing `talks.tsv` with one containing your data.
#
# TODO: Make this work with BibTex and other databases, rather than Stuart's non-standard TSV format and citation style.
# In[1]:
import pandas as pd
import os
# ## Data format
#
# The TSV needs to have the following columns: title, type, url_slug, venue, date, location, talk_url, description, with a header at the top. Many of these fields can be blank, but the columns must be in the TSV.
#
# - Fields that cannot be blank: `title`, `url_slug`, `date`. All else can be blank. `type` defaults to "Talk"
# - `date` must be formatted as YYYY-MM-DD.
# - `url_slug` will be the descriptive part of the .md file and the permalink URL for the page about the paper.
# - The .md file will be `YYYY-MM-DD-[url_slug].md` and the permalink will be `https://[yourdomain]/talks/YYYY-MM-DD-[url_slug]`
# - The combination of `url_slug` and `date` must be unique, as it will be the basis for your filenames
#
# ## Import TSV
#
# Pandas makes this easy with the read_csv function. We are using a TSV, so we specify the separator as a tab, or `\t`.
#
# I found it important to put this data in a tab-separated values format, because there are a lot of commas in this kind of data and comma-separated values can get messed up. However, you can modify the import statement, as pandas also has read_excel(), read_json(), and others.
# In[3]:
talks = pd.read_csv("talks.tsv", sep="\t", header=0)
talks
# ## Escape special characters
#
# YAML is very picky about how it takes a valid string, so we are replacing single and double quotes (and ampersands) with their HTML encoded equivilents. This makes them look not so readable in raw format, but they are parsed and rendered nicely.
# In[4]:
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
if type(text) is str:
return "".join(html_escape_table.get(c,c) for c in text)
else:
return "False"
# ## Creating the markdown files
#
# This is where the heavy lifting is done. This loops through all the rows in the TSV dataframe, then starts to concatentate a big string (```md```) that contains the markdown for each type. It does the YAML metadata first, then does the description for the individual page.
# In[5]:
loc_dict = {}
for row, item in talks.iterrows():
md_filename = str(item.date) + "-" + item.url_slug + ".md"
html_filename = str(item.date) + "-" + item.url_slug
year = item.date[:4]
md = "---\ntitle: \"" + item.title + '"\n'
md += "collection: talks" + "\n"
if len(str(item.type)) > 3:
md += 'type: "' + item.type + '"\n'
else:
md += 'type: "Talk"\n'
md += "permalink: /talks/" + html_filename + "\n"
if len(str(item.venue)) > 3:
md += 'venue: "' + item.venue + '"\n'
if len(str(item.location)) > 3:
md += "date: " + str(item.date) + "\n"
if len(str(item.location)) > 3:
md += 'location: "' + str(item.location) + '"\n'
md += "---\n"
if len(str(item.talk_url)) > 3:
md += "\n[More information here](" + item.talk_url + ")\n"
if len(str(item.description)) > 3:
md += "\n" + html_escape(item.description) + "\n"
md_filename = os.path.basename(md_filename)
#print(md)
with open("../_talks/" + md_filename, 'w') as f:
f.write(md)
# These files are in the talks directory, one directory below where we're working from.
|
zerobatu/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/check_course.py
|
160
|
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.xml_importer import check_module_metadata_editability
from opaque_keys.edx.keys import CourseKey
from opaque_keys import InvalidKeyError
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class Command(BaseCommand):
help = '''Enumerates through the course and find common errors'''
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("check_course requires one argument: <course_id>")
try:
course_key = CourseKey.from_string(args[0])
except InvalidKeyError:
course_key = SlashSeparatedCourseKey.from_deprecated_string(args[0])
store = modulestore()
course = store.get_course(course_key, depth=3)
err_cnt = 0
def _xlint_metadata(module):
err_cnt = check_module_metadata_editability(module)
for child in module.get_children():
err_cnt = err_cnt + _xlint_metadata(child)
return err_cnt
err_cnt = err_cnt + _xlint_metadata(course)
# we've had a bug where the xml_attributes field can we rewritten as a string rather than a dict
def _check_xml_attributes_field(module):
err_cnt = 0
if hasattr(module, 'xml_attributes') and isinstance(module.xml_attributes, basestring):
print 'module = {0} has xml_attributes as a string. It should be a dict'.format(module.location)
err_cnt = err_cnt + 1
for child in module.get_children():
err_cnt = err_cnt + _check_xml_attributes_field(child)
return err_cnt
err_cnt = err_cnt + _check_xml_attributes_field(course)
# check for dangling discussion items, this can cause errors in the forums
def _get_discussion_items(module):
discussion_items = []
if module.location.category == 'discussion':
discussion_items = discussion_items + [module.location]
for child in module.get_children():
discussion_items = discussion_items + _get_discussion_items(child)
return discussion_items
discussion_items = _get_discussion_items(course)
# now query all discussion items via get_items() and compare with the tree-traversal
queried_discussion_items = store.get_items(course_key=course_key, qualifiers={'category': 'discussion'})
for item in queried_discussion_items:
if item.location not in discussion_items:
print 'Found dangling discussion module = {0}'.format(item.location)
|
nyddle/hyde
|
refs/heads/master
|
hyde/users/forms.py
|
89
|
# -*- coding: utf-8 -*-
from django import forms
from .models import User
class UserForm(forms.ModelForm):
class Meta:
# Set this form to use the User model.
model = User
# Constrain the UserForm to just these fields.
fields = ("first_name", "last_name")
|
yask123/django
|
refs/heads/master
|
tests/defer/models.py
|
282
|
"""
Tests for defer() and only().
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Secondary(models.Model):
first = models.CharField(max_length=50)
second = models.CharField(max_length=50)
@python_2_unicode_compatible
class Primary(models.Model):
name = models.CharField(max_length=50)
value = models.CharField(max_length=50)
related = models.ForeignKey(Secondary, models.CASCADE)
def __str__(self):
return self.name
class Child(Primary):
pass
class BigChild(Primary):
other = models.CharField(max_length=50)
class ChildProxy(Child):
class Meta:
proxy = True
class RefreshPrimaryProxy(Primary):
class Meta:
proxy = True
def refresh_from_db(self, using=None, fields=None, **kwargs):
# Reloads all deferred fields if any of the fields is deferred.
if fields is not None:
fields = set(fields)
deferred_fields = self.get_deferred_fields()
if fields.intersection(deferred_fields):
fields = fields.union(deferred_fields)
super(RefreshPrimaryProxy, self).refresh_from_db(using, fields, **kwargs)
|
nikoonia/gem5v
|
refs/heads/master
|
ext/ply/test/lex_dup2.py
|
174
|
# lex_dup2.py
#
# Duplicated rule specifiers
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.lex as lex
tokens = [
"PLUS",
"MINUS",
"NUMBER",
]
t_PLUS = r'\+'
t_MINUS = r'-'
def t_NUMBER(t):
r'\d+'
pass
def t_NUMBER(t):
r'\d+'
pass
def t_error(t):
pass
lex.lex()
|
sliz1/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/websockets/handlers/protocol_array_wsh.py
|
265
|
#!/usr/bin/python
from mod_pywebsocket import msgutil, util
def web_socket_do_extra_handshake(request):
line = request.headers_in.get('Sec-WebSocket-Protocol')
request.ws_protocol = line.split(',', 1)[0]
#pass
def web_socket_transfer_data(request):
while True:
msgutil.send_message(request, request.ws_protocol)
return
|
bluec0re/pentest_utils
|
refs/heads/master
|
pocs/cors/test/cgi-bin/utils.py
|
1
|
#!/usr/bin/env python
# vim: set ts=8 sw=4 tw=0 fileencoding=utf-8 filetype=python expandtab:
import cgi
import os
def send_response(body, headers=None):
if not headers:
headers = {}
if 'Content-type' not in headers:
headers['Content-type'] = 'text/html'
for key, value in headers.items():
for v in value:
print("%s: %s" % (key, v))
print("")
print(body)
def get_cookie(name):
if 'HTTP_COOKIE' not in os.environ:
return None
cookies = [cookie.strip().split('=', 1) for cookie in os.environ['HTTP_COOKIE'].split(";")]
for cookie in cookies:
if cookie[0] == name:
return cookie[1]
return None
class Headers(dict):
def __setitem__(self, name, value):
if name in self:
if isinstance(self[name], list):
value = self[name] + [value]
else:
value = [value]
super(Headers, self).__setitem__(name, value)
|
guewen/rma
|
refs/heads/master
|
__unported__/crm_claim_ext/wizard/returned_lines_from_invoice.py
|
14
|
# -*- coding: utf-8 -*-
#########################################################################
# #
# #
#########################################################################
# #
# Copyright (C) 2009-2011 Akretion, Emmanuel Samyn #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
#########################################################################
from osv import fields, osv
import pooler
#import time
#from datetime import datetime
#from dateutil.relativedelta import relativedelta
#===== WIZ STEP 1 : Invoice selection
class returned_lines_from_invoice_invoice(osv.osv_memory):
_name='returned_lines_from_invoice_invoice.wizard'
_description='Wizard to create product return lines from invoice'
_columns = {
'invoice_id': fields.many2one('account.invoice', 'Invoice', required=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
}
# Get partner from case is set
def _get_default_partner_id(self, cr, uid, context):
return self.pool.get('crm.claim').read(cr, uid, context['active_id'], ['partner_id'])['partner_id']
_defaults = {
'partner_id': _get_default_partner_id,
}
# If "Cancel" button pressed
def action_cancel(self,cr,uid,ids,conect=None):
return {'type': 'ir.actions.act_window_close',}
# If "Return all" button pressed
def action_return_all(self, cr, uid, ids, context):
# Get invoice id
inv_id = 0
for wiz_obj in self.browse(cr,uid,ids):
inv_id = wiz_obj.invoice_id.id
# Get invoice line ids from invoice id
invoice_line_pool = self.pool.get('account.invoice.line')
invoice_lines_ids = invoice_line_pool.search(cr, uid, [('invoice_id', '=', inv_id)])
# Get invoice lines from invoice line ids
for invoice_line in invoice_line_pool.browse(cr,uid,invoice_lines_ids):
claim_line_pool = self.pool.get('claim.line')
line_id = claim_line_pool.create(cr, uid, {
'claim_origine' : "none",
'invoice_id' : invoice_line.invoice_id.id,
'product_id' : invoice_line.product_id.id,
'product_returned_quantity' : invoice_line.quantity,
'unit_sale_price' : invoice_line.price_unit,
#'prodlot_id' : invoice_line.,
'claim_id' : context['active_id'],
'selected' : False,
'state' : 'draft',
})
for line in claim_line_pool.browse(cr,uid,[line_id],context):
line.set_warranty()
return {'type': 'ir.actions.act_window_close',}
# If "Select lines" button pressed
def action_select_lines(self, cr, uid, ids, context):
# Add invoice_id to context
for wiz_obj in self.browse(cr,uid,ids):
context['invoice_id'] = wiz_obj.invoice_id.id
return {
'context': context,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'returned_lines_from_invoice_line.wizard',
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'new',
}
returned_lines_from_invoice_invoice()
#===== WIZ STEP 2 : line selection
class returned_lines_from_invoice_lines(osv.osv_memory):
_name='returned_lines_from_invoice_line.wizard'
_description='Wizard to create product return lines from invoice'
_columns = {
'claim_line_ids' : fields.many2many('temp.claim.line', string='claim lines'),
}
# Get possible returns from invoice
def _get_possible_returns_from_invoice(self, cr, uid, context):
# Get invoice lines from invoice
invoice_lines_ids = self.pool.get('account.invoice.line').search(cr, uid, [('invoice_id', '=', context['invoice_id'])])
M2M = []
# Create return lines from invoice lines
for invoice_line in self.pool.get('account.invoice.line').browse(cr,uid,invoice_lines_ids):
M2M.append(self.pool.get('temp.claim.line').create(cr, uid, {
'claim_origine' : "none",
'invoice_id' : invoice_line.invoice_id.id,
'invoice_line_id' : invoice_line.id,
'product_id' : invoice_line.product_id.id,
'product_returned_quantity' : invoice_line.quantity,
#'prodlot_id' : invoice_line.,
'price_unit': invoice_line.price_unit,
}))
return M2M
_defaults = {
'claim_line_ids': _get_possible_returns_from_invoice,
}
# If "Cancel" button pressed
def action_cancel(self,cr,uid,ids,conect=None):
return {'type': 'ir.actions.act_window_close',}
# If "Create" button pressed, for all temp return line create return line
def action_create_returns(self, cr, uid, ids, context=None):
for wiz_obj in self.browse(cr,uid,ids):
for line in wiz_obj.claim_line_ids:
claim_line_pool = self.pool.get('claim.line')
line_id = claim_line_pool.create(cr, uid, {
'claim_origine' : line.claim_origine,
'invoice_id' : line.invoice_id.id,
'product_id' : line.product_id.id,
'product_returned_quantity' : line.product_returned_quantity,
'unit_sale_price' : line.price_unit,
#'prodlot_id' : invoice_line.,
'claim_id' : context['active_id'],
'selected' : False,
'state' : 'draft',
})
for line in claim_line_pool.browse(cr,uid,[line_id],context):
line.set_warranty()
return {
'type': 'ir.actions.act_window_close',
}
returned_lines_from_invoice_lines()
#===== Temp returned line
class temp_claim_line(osv.osv_memory):
"""
Class to handle a product return line (corresponding to one invoice line)
"""
_name = "temp.claim.line"
_description = "List of product to return"
_columns = {
'claim_origine': fields.selection([('none','Not specified'),
('legal','Legal retractation'),
('cancellation','Order cancellation'),
('damaged','Damaged delivered product'),
('error','Shipping error'),
('exchange','Exchange request'),
('lost','Lost during transport'),
('other','Other')], 'Claim Subject', required=True, help="To describe the line product problem"),
'invoice_id': fields.many2one('account.invoice', 'Invoice'),
'invoice_line_id' : fields.many2one('account.invoice.line', 'Invoice line'),
'product_id': fields.many2one('product.product', 'Product'),
'product_returned_quantity' : fields.float('Returned quantity', digits=(12,2), help="Quantity of product returned"),
'prodlot_id': fields.many2one('stock.production.lot', 'Serial / Lot Number'),
'price_unit': fields.float('Unit sale price', digits=(12,2),),
}
temp_claim_line()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
wanglongqi/sympy
|
refs/heads/master
|
sympy/galgebra/manifold.py
|
51
|
# sympy/galgebra/manifold.py
"""
manifold.py defines the Manifold class which allows one to create a
vector manifold (manifold defined by vector field of coordinates in
embedding vector space) calculate the tangent vectors and derivatives
of tangent vectors.
Once manifold is created multivector fields can be constructed in the
tangent space and all the geometric algebra products and derivatives
of the multivector fields calculated.
Note that all calculations are done in the embedding space. Future
versions of the code will allow manifolds defined purely in terms of
a metric.
"""
from __future__ import print_function
from itertools import combinations
from os import system
import copy
from sympy import trigsimp, simplify
from sympy.core.compatibility import range
from sympy.galgebra.ga import MV
from sympy.galgebra.debug import oprint
from sympy.galgebra.ncutil import linear_expand
from sympy.galgebra.printing import find_executable
def fct_to_str(fct_names):
import sys
current_file = open(sys.argv[0], 'r')
file_str = current_file.read()
current_file.close()
if isinstance(fct_names, str):
return fct_names
fcts_str = ''
for fct_name in fct_names:
start_def = file_str.find('\ndef ' + fct_name)
end_def = file_str.find('\ndef ', start_def + 5)
start_class = file_str.find('\nclass ', start_def + 5)
end_def = min(end_def, start_class)
fcts_str += file_str[start_def:end_def]
return fcts_str
def VectorComponents(X, basis):
(coefs, bases) = linear_expand(X.obj)
cdict = {}
for (coef, base) in zip(coefs, bases):
cdict[str(base)] = coef
comp = []
for base in basis:
if base in cdict:
comp.append(cdict[base])
else:
comp.append(0)
return comp
def FillTemplate(self, template):
Nd = 0
var = []
id_old = 0
while True:
id_new = template.find('$', id_old + 1)
if id_new == -1:
break
Nd += 1
if Nd % 2 == 0:
var.append(template[id_old + 1:id_new])
id_old = id_new
var.sort(reverse=True)
for v in var:
template = template.replace('$' + v + '$', str(eval('self.' + v)))
return template
class Manifold:
def __init__(self, x, coords, debug=False, I=None):
"""
coords: list of coordinate variables
x: vector fuction of coordinate variables (parametric surface)
"""
self.I = I
self.x = x
self.coords = coords
self.basis = []
self.basis_str = []
self.embedded_basis = []
for u in coords:
tv = x.diff(u)
self.basis.append(tv)
(coefs, bases) = linear_expand(tv.obj)
tc = {}
for (coef, base) in zip(coefs, bases):
str_base = str(base)
tc[str_base] = coef
if str_base not in self.embedded_basis:
self.embedded_basis.append(str_base)
self.basis_str.append(tc)
self.gij = []
for base1 in self.basis:
tmp = []
for base2 in self.basis:
tmp.append(simplify(trigsimp((base1 | base2).scalar())))
self.gij.append(tmp)
for tv in self.basis_str:
for base in self.embedded_basis:
if base not in tv:
tv[base] = 0
self.dim = len(self.basis)
indexes = tuple(range(self.dim))
self.index = [()]
for i in indexes:
self.index.append(tuple(combinations(indexes, i + 1)))
self.index = tuple(self.index)
self.MFbasis = [[MV.ONE], self.basis]
for igrade in self.index[2:]:
grade = []
for iblade in igrade:
blade = MV(1, 'scalar')
for ibasis in iblade:
blade ^= self.basis[ibasis]
blade = blade.trigsimp(deep=True, recursive=True)
grade.append(blade)
self.MFbasis.append(grade)
self.E = self.MFbasis[-1][0]
self.E_sq = trigsimp((self.E * self.E).scalar(), deep=True, recursive=True)
duals = copy.copy(self.MFbasis[-2])
duals.reverse()
sgn = 1
self.rbasis = []
for dual in duals:
recpv = (sgn * dual * self.E).trigsimp(deep=True, recursive=True)
self.rbasis.append(recpv)
sgn = -sgn
self.dbasis = []
for base in self.basis:
dbase = []
for coord in self.coords:
d = base.diff(coord).trigsimp(deep=True, recursive=True)
dbase.append(d)
self.dbasis.append(dbase)
self.surface = {}
(coefs, bases) = linear_expand(self.x.obj)
for (coef, base) in zip(coefs, bases):
self.surface[str(base)] = coef
self.grad = MV()
self.grad.is_grad = True
self.grad.blade_rep = True
self.grad.igrade = 1
self.grad.rcpr_bases_MV = []
for rbase in self.rbasis:
self.grad.rcpr_bases_MV.append(rbase / self.E_sq)
self.grad.rcpr_bases_MV = tuple(self.grad.rcpr_bases_MV)
self.grad.coords = self.coords
self.grad.norm = self.E_sq
self.grad.connection = {}
if debug:
oprint('x', self.x,
'coords', self.coords,
'basis vectors', self.basis,
'index', self.index,
'basis blades', self.MFbasis,
'E', self.E,
'E**2', self.E_sq,
'*basis', duals,
'rbasis', self.rbasis,
'basis derivatives', self.dbasis,
'surface', self.surface,
'basis strings', self.basis_str,
'embedding basis', self.embedded_basis,
'metric tensor', self.gij)
def Basis(self):
return tuple(self.basis)
def Grad(self, F): # Intrisic Derivative
dF = 0
for (rbase, coord) in zip(self.rbasis, self.coords):
dF += rbase * F.diff(coord)
dF = dF.simplify()
dF = dF / self.E_sq
return dF
def D(self, F): # Covariant Derivative
dF = self.Grad(F)
return self.Proj(dF)
def S(self, a): # Shape Tensor
return
def Proj(self, F):
PF = (F < self.E) * self.E
PF = PF.simplify()
PF = PF.trigsimp(deep=True, recursive=True)
return (PF / self.E_sq).simplify()
def Reject(self, F):
return (F - self.Proj(F)).simplify()
def DD(self, v, f, opstr=False):
mf_comp = []
for e in self.rbasis:
mf_comp.append((v | e).scalar() / self.E_sq)
result = MV()
op = ''
for (coord, comp) in zip(self.coords, mf_comp):
result += comp * (f.diff(coord))
if opstr:
op += '(' + str(comp) + ')D{' + str(coord) + '}+'
if opstr:
return str(result), op[:-1]
return result
def Plot2DSurface(self, u_range, v_range, surf=True, grid=True, tan=1.0, scalar_field=None, skip=[1, 1], fct_def=None):
plot_template = \
"""
from numpy import mgrid,shape,swapaxes,zeros,log,exp,sin,cos,tan
$fct_def$
eps = 1.0e-6
u_r = $u_range$
v_r = $v_range$
$coords$ = mgrid[u_r[0]:u_r[1]+eps:(u_r[1]-u_r[0])/float(u_r[2]-1),\\
v_r[0]:v_r[1]+eps:(v_r[1]-v_r[0])/float(v_r[2]-1)]
X = $surface$
scal_tan = $tan$
x = X['ex']
y = X['ey']
z = X['ez']
du = $basis_str[0]$
dv = $basis_str[1]$
Zero = zeros(shape(x))
if scal_tan > 0.0:
du_x = Zero+du['ex']
du_y = Zero+du['ey']
du_z = Zero+du['ez']
dv_x = Zero+dv['ex']
dv_y = Zero+dv['ey']
dv_z = Zero+dv['ez']
f = $scalar_field$
n = $n$
skip = $skip$
su = skip[0]
sv = skip[1]
if f[0] != None:
dn_x = f[0]*n[0]
dn_y = f[0]*n[1]
dn_z = f[0]*n[2]
from mayavi.mlab import plot3d,quiver3d,mesh,figure
figure(bgcolor=(1.0,1.0,1.0))
if $surf$:
mesh(x,y,z,colormap="gist_earth")
if $grid$:
for i in range(shape(u)[0]):
plot3d(x[i,],y[i,],z[i,],line_width=1.0,color=(0.0,0.0,0.0),tube_radius=None)
xr = swapaxes(x,0,1)
yr = swapaxes(y,0,1)
zr = swapaxes(z,0,1)
for i in range(shape(u)[1]):
plot3d(xr[i,],yr[i,],zr[i,],line_width=1.0,color=(0.0,0.0,0.0),tube_radius=None)
if scal_tan > 0.0:
quiver3d(x[::su,::sv],y[::su,::sv],z[::su,::sv],\\
du_x[::su,::sv],du_y[::su,::sv],du_z[::su,::sv],scale_factor=scal_tan,\\
line_width=1.0,color=(0.0,0.0,0.0),scale_mode='vector',mode='arrow',resolution=16)
quiver3d(x[::su,::sv],y[::su,::sv],z[::su,::sv],\\
dv_x[::su,::sv],dv_y[::su,::sv],dv_z[::su,::sv],scale_factor=scal_tan,\\
line_width=1.0,color=(0.0,0.0,0.0),scale_mode='vector',mode='arrow',resolution=16)
if f[0] != None:
quiver3d(x[::su,::sv],y[::su,::sv],z[::su,::sv],\\
dn_x[::su,::sv],dn_y[::su,::sv],dn_z[::su,::sv],\\
line_width=1.0,color=(0.0,0.0,0.0),scale_mode='none',mode='cone',\\
resolution=16,opacity=0.5)
"""
if len(self.coords) != 2:
return
self.skip = skip
self.surf = surf
self.grid = grid
self.tan = tan
if fct_def is None:
self.fct_def = ' '
else:
self.fct_def = fct_to_str(fct_def)
self.u_range = u_range
self.v_range = v_range
self.scalar_field = [scalar_field]
print(self.I, '\n', self.basis[0], '\n', self.basis[1])
self.normal = -self.I * (self.basis[0] ^ self.basis[1])
self.n = VectorComponents(self.normal, ['ex', 'ey', 'ez'])
msurf = open('manifold_surf.py', 'w')
plot_template = FillTemplate(self, plot_template)
msurf.write(plot_template)
msurf.close()
mayavi2 = find_executable('mayavi2')
if mayavi2 is None:
return
system(mayavi2 + ' manifold_surf.py &')
return
|
rodgerd/cobbler
|
refs/heads/master
|
cobbler/action_replicate.py
|
2
|
"""
Replicate from a cobbler master.
Copyright 2007-2009, Red Hat, Inc
Michael DeHaan <mdehaan@redhat.com>
Scott Henson <shenson@redhat.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
import os
import os.path
import xmlrpclib
import api as cobbler_api
import utils
from utils import _
from cexceptions import *
import clogger
import fnmatch
OBJ_TYPES = [ "distro", "profile", "system", "repo", "image" ]
class Replicate:
def __init__(self,config,logger=None):
"""
Constructor
"""
self.config = config
self.settings = config.settings()
self.api = config.api
self.remote = None
self.uri = None
if logger is None:
logger = clogger.Logger()
self.logger = logger
def rsync_it(self,from_path,to_path):
from_path = "%s::%s" % (self.host, from_path)
cmd = "rsync -avzH %s %s" % (from_path, to_path)
rc = utils.subprocess_call(self.logger, cmd, shell=True)
if rc !=0:
self.logger.info("rsync failed")
# -------------------------------------------------------
def remove_objects_not_on_master(self, obj_type):
locals = utils.loh_to_hoh(self.local_data[obj_type],"uid")
remotes = utils.loh_to_hoh(self.remote_data[obj_type],"uid")
for (luid, ldata) in locals.iteritems():
if not remotes.has_key(luid):
try:
self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
except Exception, e:
utils.log_exc(self.logger)
# -------------------------------------------------------
def add_objects_not_on_local(self, obj_type):
locals = utils.loh_to_hoh(self.local_data[obj_type], "uid")
remotes = utils.loh_sort_by_key(self.remote_data[obj_type],"depth")
remotes2 = utils.loh_to_hoh(self.remote_data[obj_type],"depth")
for rdata in remotes:
# do not add the system if it is not on the transfer list
if not self.must_include[obj_type].has_key(rdata["name"]):
continue
if not locals.has_key(rdata["uid"]):
creator = getattr(self.api, "new_%s" % obj_type)
newobj = creator()
newobj.from_datastruct(rdata)
try:
self.logger.info("adding %s %s" % (obj_type, rdata["name"]))
self.api.add_item(obj_type, newobj)
except Exception, e:
utils.log_exc(self.logger)
# -------------------------------------------------------
def replace_objects_newer_on_remote(self, obj_type):
locals = utils.loh_to_hoh(self.local_data[obj_type],"uid")
remotes = utils.loh_to_hoh(self.remote_data[obj_type],"uid")
for (ruid, rdata) in remotes.iteritems():
# do not add the system if it is not on the transfer list
if not self.must_include[obj_type].has_key(rdata["name"]):
continue
if locals.has_key(ruid):
ldata = locals[ruid]
if ldata["mtime"] < rdata["mtime"]:
if ldata["name"] != rdata["name"]:
self.logger.info("removing %s %s" % (obj_type, ldata["name"]))
self.api.remove_item(obj_type, ldata["name"], recursive=True, logger=self.logger)
creator = getattr(self.api, "new_%s" % obj_type)
newobj = creator()
newobj.from_datastruct(rdata)
try:
self.logger.info("updating %s %s" % (obj_type, rdata["name"]))
self.api.add_item(obj_type, newobj)
except Exception, e:
utils.log_exc(self.logger)
# -------------------------------------------------------
def replicate_data(self):
self.local_data = {}
self.remote_data = {}
self.logger.info("Querying Both Servers")
for what in OBJ_TYPES:
self.remote_data[what] = self.remote.get_items(what)
self.local_data[what] = self.local.get_items(what)
self.generate_include_map()
# FIXME: this should be optional as we might want to maintain local system records
# and just keep profiles/distros common
if self.prune:
self.logger.info("Removing Objects Not Stored On Master")
obj_types = OBJ_TYPES
if len(self.system_patterns) == 0:
obj_types.remove("system")
for what in obj_types:
self.remove_objects_not_on_master(what)
else:
self.logger.info("*NOT* Removing Objects Not Stored On Master")
if not self.omit_data:
self.logger.info("Rsyncing distros")
for distro in self.must_include["distro"].keys():
if self.must_include["distro"][distro] == 1:
distro = self.remote.get_item('distro',distro)
if distro["breed"] == 'redhat':
dest = distro["kernel"]
top = None
while top != 'images' and top != '':
dest, top = os.path.split(dest)
if not dest == os.path.sep and len(dest) > 1:
parentdir = os.path.split(dest)[0]
if not os.path.isdir(parentdir):
os.makedirs(parentdir)
self.rsync_it("distro-%s"%distro["name"], dest)
self.logger.info("Rsyncing repos")
for repo in self.must_include["repo"].keys():
if self.must_include["repo"][repo] == 1:
self.rsync_it("repo-%s"%repo, os.path.join(self.settings.webdir,"repo_mirror",repo))
self.logger.info("Rsyncing distro repo configs")
self.rsync_it("cobbler-distros/config", os.path.join(self.settings.webdir,"ks_mirror"))
self.logger.info("Rsyncing kickstart templates & snippets")
self.rsync_it("cobbler-kickstarts","/var/lib/cobbler/kickstarts")
self.rsync_it("cobbler-snippets","/var/lib/cobbler/snippets")
self.logger.info("Rsyncing triggers")
self.rsync_it("cobbler-triggers","/var/lib/cobbler/triggers")
else:
self.logger.info("*NOT* Rsyncing Data")
self.logger.info("Removing Objects Not Stored On Local")
for what in OBJ_TYPES:
self.add_objects_not_on_local(what)
self.logger.info("Updating Objects Newer On Remote")
for what in OBJ_TYPES:
self.replace_objects_newer_on_remote(what)
def link_distros(self):
for distro in self.api.distros():
self.logger.debug("Linking Distro %s" % distro.name)
utils.link_distro(self.settings, distro)
def generate_include_map(self):
self.remote_names = {}
self.remote_dict = {}
for ot in OBJ_TYPES:
self.remote_names[ot] = utils.loh_to_hoh(self.remote_data[ot],"name").keys()
self.remote_dict[ot] = utils.loh_to_hoh(self.remote_data[ot],"name")
self.logger.debug("remote names struct is %s" % self.remote_names)
self.must_include = {
"distro" : {},
"profile" : {},
"system" : {},
"image" : {},
"repo" : {}
}
# include all profiles that are matched by a pattern
for obj_type in OBJ_TYPES:
patvar = getattr(self, "%s_patterns" % obj_type)
self.logger.debug("* Finding Explicit %s Matches" % obj_type)
for pat in patvar:
for remote in self.remote_names[obj_type]:
self.logger.debug("?: seeing if %s looks like %s" % (remote,pat))
if fnmatch.fnmatch(remote, pat):
self.must_include[obj_type][remote] = 1
# include all profiles that systems require
# whether they are explicitly included or not
self.logger.debug("* Adding Profiles Required By Systems")
for sys in self.must_include["system"].keys():
pro = self.remote_dict["system"][sys].get("profile","")
self.logger.debug("?: requires profile: %s" % pro)
if pro != "":
self.must_include["profile"][pro] = 1
# include all profiles that subprofiles require
# whether they are explicitly included or not
# very deep nesting is possible
self.logger.debug("* Adding Profiles Required By SubProfiles")
while True:
loop_exit = True
for pro in self.must_include["profile"].keys():
parent = self.remote_dict["profile"][pro].get("parent","")
if parent != "":
if not self.must_include["profile"].has_key(parent):
self.must_include["profile"][parent] = 1
loop_exit = False
if loop_exit:
break
# require all distros that any profiles in the generated list requires
# whether they are explicitly included or not
self.logger.debug("* Adding Distros Required By Profiles")
for p in self.must_include["profile"].keys():
distro = self.remote_dict["profile"][p].get("distro","")
if not distro == "<<inherit>>" and not distro == "~":
self.logger.info("Adding repo %s for profile %s."%(p, distro))
self.must_include["distro"][distro] = 1
# require any repos that any profiles in the generated list requires
# whether they are explicitly included or not
self.logger.debug("* Adding Repos Required By Profiles")
for p in self.must_include["profile"].keys():
repos = self.remote_dict["profile"][p].get("repos",[])
for r in repos:
self.must_include["repo"][r] = 1
# include all images that systems require
# whether they are explicitly included or not
self.logger.debug("* Adding Images Required By Systems")
for sys in self.must_include["system"].keys():
img = self.remote_dict["system"][sys].get("image","")
self.logger.debug("?: requires profile: %s" % pro)
if img != "":
self.must_include["image"][img] = 1
# FIXME: remove debug
for ot in OBJ_TYPES:
self.logger.debug("transfer list for %s is %s" % (ot, self.must_include[ot].keys()))
# -------------------------------------------------------
def run(self, cobbler_master=None, distro_patterns=None, profile_patterns=None, system_patterns=None, repo_patterns=None, image_patterns=None, prune=False, omit_data=False):
"""
Get remote profiles and distros and sync them locally
"""
self.distro_patterns = distro_patterns.split()
self.profile_patterns = profile_patterns.split()
self.system_patterns = system_patterns.split()
self.repo_patterns = repo_patterns.split()
self.image_patterns = image_patterns.split()
self.omit_data = omit_data
self.prune = prune
self.logger.info("cobbler_master = %s" % cobbler_master)
self.logger.info("profile_patterns = %s" % self.profile_patterns)
self.logger.info("system_patterns = %s" % self.system_patterns)
self.logger.info("omit_data = %s" % self.omit_data)
if cobbler_master is not None:
self.logger.info("using CLI defined master")
self.host = cobbler_master
self.uri = 'http://%s/cobbler_api' % cobbler_master
elif len(self.settings.cobbler_master) > 0:
self.logger.info("using info from master")
self.host = self.settings.cobbler_master
self.uri = 'http://%s/cobbler_api' % self.settings.cobbler_master
else:
utils.die('No cobbler master specified, try --master.')
self.logger.info("XMLRPC endpoint: %s" % self.uri)
self.logger.debug("test ALPHA")
self.remote = xmlrpclib.Server(self.uri)
self.logger.debug("test BETA")
self.remote.ping()
self.local = xmlrpclib.Server("http://127.0.0.1/cobbler_api")
self.local.ping()
self.replicate_data()
self.link_distros()
self.logger.info("Syncing")
self.api.sync()
self.logger.info("Done")
return True
|
fldc/CouchPotatoServer
|
refs/heads/custom
|
libs/enzyme/ogm.py
|
180
|
# -*- coding: utf-8 -*-
# enzyme - Video metadata parser
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
# Copyright 2003-2006 Thomas Schueppel <stain@acm.org>
# Copyright 2003-2006 Dirk Meyer <dischi@freevo.org>
#
# This file is part of enzyme.
#
# enzyme is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# enzyme is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with enzyme. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['Parser']
import struct
import re
import stat
import os
import logging
from exceptions import ParseError
import core
# get logging object
log = logging.getLogger(__name__)
PACKET_TYPE_HEADER = 0x01
PACKED_TYPE_METADATA = 0x03
PACKED_TYPE_SETUP = 0x05
PACKET_TYPE_BITS = 0x07
PACKET_IS_SYNCPOINT = 0x08
#VORBIS_VIDEO_PACKET_INFO = 'video'
STREAM_HEADER_VIDEO = '<4sIQQIIHII'
STREAM_HEADER_AUDIO = '<4sIQQIIHHHI'
VORBISCOMMENT = { 'TITLE': 'title',
'ALBUM': 'album',
'ARTIST': 'artist',
'COMMENT': 'comment',
'ENCODER': 'encoder',
'TRACKNUMBER': 'trackno',
'LANGUAGE': 'language',
'GENRE': 'genre',
}
# FIXME: check VORBISCOMMENT date and convert to timestamp
# Deactived tag: 'DATE': 'date',
MAXITERATIONS = 30
class Ogm(core.AVContainer):
table_mapping = { 'VORBISCOMMENT' : VORBISCOMMENT }
def __init__(self, file):
core.AVContainer.__init__(self)
self.samplerate = 1
self.all_streams = [] # used to add meta data to streams
self.all_header = []
for i in range(MAXITERATIONS):
granule, nextlen = self._parseOGGS(file)
if granule == None:
if i == 0:
# oops, bad file
raise ParseError()
break
elif granule > 0:
# ok, file started
break
# seek to the end of the stream, to avoid scanning the whole file
if (os.stat(file.name)[stat.ST_SIZE] > 50000):
file.seek(os.stat(file.name)[stat.ST_SIZE] - 49000)
# read the rest of the file into a buffer
h = file.read()
# find last OggS to get length info
if len(h) > 200:
idx = h.find('OggS')
pos = -49000 + idx
if idx:
file.seek(os.stat(file.name)[stat.ST_SIZE] + pos)
while 1:
granule, nextlen = self._parseOGGS(file)
if not nextlen:
break
# Copy metadata to the streams
if len(self.all_header) == len(self.all_streams):
for i in range(len(self.all_header)):
# get meta info
for key in self.all_streams[i].keys():
if self.all_header[i].has_key(key):
self.all_streams[i][key] = self.all_header[i][key]
del self.all_header[i][key]
if self.all_header[i].has_key(key.upper()):
asi = self.all_header[i][key.upper()]
self.all_streams[i][key] = asi
del self.all_header[i][key.upper()]
# Chapter parser
if self.all_header[i].has_key('CHAPTER01') and \
not self.chapters:
while 1:
s = 'CHAPTER%02d' % (len(self.chapters) + 1)
if self.all_header[i].has_key(s) and \
self.all_header[i].has_key(s + 'NAME'):
pos = self.all_header[i][s]
try:
pos = int(pos)
except ValueError:
new_pos = 0
for v in pos.split(':'):
new_pos = new_pos * 60 + float(v)
pos = int(new_pos)
c = self.all_header[i][s + 'NAME']
c = core.Chapter(c, pos)
del self.all_header[i][s + 'NAME']
del self.all_header[i][s]
self.chapters.append(c)
else:
break
# If there are no video streams in this ogg container, it
# must be an audio file. Raise an exception to cause the
# factory to fall back to audio.ogg.
if len(self.video) == 0:
raise ParseError
# Copy Metadata from tables into the main set of attributes
for header in self.all_header:
self._appendtable('VORBISCOMMENT', header)
def _parseOGGS(self, file):
h = file.read(27)
if len(h) == 0:
# Regular File end
return None, None
elif len(h) < 27:
log.debug(u'%d Bytes of Garbage found after End.' % len(h))
return None, None
if h[:4] != "OggS":
log.debug(u'Invalid Ogg')
raise ParseError()
version = ord(h[4])
if version != 0:
log.debug(u'Unsupported OGG/OGM Version %d' % version)
return None, None
head = struct.unpack('<BQIIIB', h[5:])
headertype, granulepos, serial, pageseqno, checksum, \
pageSegCount = head
self.mime = 'application/ogm'
self.type = 'OGG Media'
tab = file.read(pageSegCount)
nextlen = 0
for i in range(len(tab)):
nextlen += ord(tab[i])
else:
h = file.read(1)
packettype = ord(h[0]) & PACKET_TYPE_BITS
if packettype == PACKET_TYPE_HEADER:
h += file.read(nextlen - 1)
self._parseHeader(h, granulepos)
elif packettype == PACKED_TYPE_METADATA:
h += file.read(nextlen - 1)
self._parseMeta(h)
else:
file.seek(nextlen - 1, 1)
if len(self.all_streams) > serial:
stream = self.all_streams[serial]
if hasattr(stream, 'samplerate') and \
stream.samplerate:
stream.length = granulepos / stream.samplerate
elif hasattr(stream, 'bitrate') and \
stream.bitrate:
stream.length = granulepos / stream.bitrate
return granulepos, nextlen + 27 + pageSegCount
def _parseMeta(self, h):
flags = ord(h[0])
headerlen = len(h)
if headerlen >= 7 and h[1:7] == 'vorbis':
header = {}
nextlen, self.encoder = self._extractHeaderString(h[7:])
numItems = struct.unpack('<I', h[7 + nextlen:7 + nextlen + 4])[0]
start = 7 + 4 + nextlen
for _ in range(numItems):
(nextlen, s) = self._extractHeaderString(h[start:])
start += nextlen
if s:
a = re.split('=', s)
header[(a[0]).upper()] = a[1]
# Put Header fields into info fields
self.type = 'OGG Vorbis'
self.subtype = ''
self.all_header.append(header)
def _parseHeader(self, header, granule):
headerlen = len(header)
flags = ord(header[0])
if headerlen >= 30 and header[1:7] == 'vorbis':
ai = core.AudioStream()
ai.version, ai.channels, ai.samplerate, bitrate_max, ai.bitrate, \
bitrate_min, blocksize, framing = \
struct.unpack('<IBIiiiBB', header[7:7 + 23])
ai.codec = 'Vorbis'
#ai.granule = granule
#ai.length = granule / ai.samplerate
self.audio.append(ai)
self.all_streams.append(ai)
elif headerlen >= 7 and header[1:7] == 'theora':
# Theora Header
# XXX Finish Me
vi = core.VideoStream()
vi.codec = 'theora'
self.video.append(vi)
self.all_streams.append(vi)
elif headerlen >= 142 and \
header[1:36] == 'Direct Show Samples embedded in Ogg':
# Old Directshow format
# XXX Finish Me
vi = core.VideoStream()
vi.codec = 'dshow'
self.video.append(vi)
self.all_streams.append(vi)
elif flags & PACKET_TYPE_BITS == PACKET_TYPE_HEADER and \
headerlen >= struct.calcsize(STREAM_HEADER_VIDEO) + 1:
# New Directshow Format
htype = header[1:9]
if htype[:5] == 'video':
sh = header[9:struct.calcsize(STREAM_HEADER_VIDEO) + 9]
streamheader = struct.unpack(STREAM_HEADER_VIDEO, sh)
vi = core.VideoStream()
(type, ssize, timeunit, samplerate, vi.length, buffersize, \
vi.bitrate, vi.width, vi.height) = streamheader
vi.width /= 65536
vi.height /= 65536
# XXX length, bitrate are very wrong
vi.codec = type
vi.fps = 10000000 / timeunit
self.video.append(vi)
self.all_streams.append(vi)
elif htype[:5] == 'audio':
sha = header[9:struct.calcsize(STREAM_HEADER_AUDIO) + 9]
streamheader = struct.unpack(STREAM_HEADER_AUDIO, sha)
ai = core.AudioStream()
(type, ssize, timeunit, ai.samplerate, ai.length, buffersize, \
ai.bitrate, ai.channels, bloc, ai.bitrate) = streamheader
self.samplerate = ai.samplerate
log.debug(u'Samplerate %d' % self.samplerate)
self.audio.append(ai)
self.all_streams.append(ai)
elif htype[:4] == 'text':
subtitle = core.Subtitle()
# FIXME: add more info
self.subtitles.append(subtitle)
self.all_streams.append(subtitle)
else:
log.debug(u'Unknown Header')
def _extractHeaderString(self, header):
len = struct.unpack('<I', header[:4])[0]
try:
return (len + 4, unicode(header[4:4 + len], 'utf-8'))
except (KeyError, IndexError, UnicodeDecodeError):
return (len + 4, None)
Parser = Ogm
|
hyrole/scrapy
|
refs/heads/master
|
scrapy/contrib/logstats.py
|
144
|
import warnings
from scrapy.exceptions import ScrapyDeprecationWarning
warnings.warn("Module `scrapy.contrib.logstats` is deprecated, "
"use `scrapy.extensions.logstats` instead",
ScrapyDeprecationWarning, stacklevel=2)
from scrapy.extensions.logstats import *
|
danielpronych/python-twitter
|
refs/heads/master
|
simplejson/scanner.py
|
928
|
"""JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
|
Hybrid-Cloud/badam
|
refs/heads/master
|
patches_tool/aws_patch/aws_deps/libcloud/storage/__init__.py
|
1
|
"""
Module for working with Storage
"""
|
akshatharaj/django
|
refs/heads/master
|
tests/get_object_or_404/tests.py
|
296
|
from __future__ import unicode_literals
from django.http import Http404
from django.shortcuts import get_list_or_404, get_object_or_404
from django.test import TestCase
from .models import Article, Author
class GetObjectOr404Tests(TestCase):
def test_get_object_or_404(self):
a1 = Author.objects.create(name="Brave Sir Robin")
a2 = Author.objects.create(name="Patsy")
# No Articles yet, so we should get a Http404 error.
self.assertRaises(Http404, get_object_or_404, Article, title="Foo")
article = Article.objects.create(title="Run away!")
article.authors = [a1, a2]
# get_object_or_404 can be passed a Model to query.
self.assertEqual(
get_object_or_404(Article, title__contains="Run"),
article
)
# We can also use the Article manager through an Author object.
self.assertEqual(
get_object_or_404(a1.article_set, title__contains="Run"),
article
)
# No articles containing "Camelot". This should raise a Http404 error.
self.assertRaises(
Http404,
get_object_or_404, a1.article_set, title__contains="Camelot"
)
# Custom managers can be used too.
self.assertEqual(
get_object_or_404(Article.by_a_sir, title="Run away!"),
article
)
# QuerySets can be used too.
self.assertEqual(
get_object_or_404(Article.objects.all(), title__contains="Run"),
article
)
# Just as when using a get() lookup, you will get an error if more than
# one object is returned.
self.assertRaises(
Author.MultipleObjectsReturned,
get_object_or_404, Author.objects.all()
)
# Using an empty QuerySet raises a Http404 error.
self.assertRaises(
Http404,
get_object_or_404, Article.objects.none(), title__contains="Run"
)
# get_list_or_404 can be used to get lists of objects
self.assertEqual(
get_list_or_404(a1.article_set, title__icontains="Run"),
[article]
)
# Http404 is returned if the list is empty.
self.assertRaises(
Http404,
get_list_or_404, a1.article_set, title__icontains="Shrubbery"
)
# Custom managers can be used too.
self.assertEqual(
get_list_or_404(Article.by_a_sir, title__icontains="Run"),
[article]
)
# QuerySets can be used too.
self.assertEqual(
get_list_or_404(Article.objects.all(), title__icontains="Run"),
[article]
)
def test_bad_class(self):
# Given an argument klass that is not a Model, Manager, or Queryset
# raises a helpful ValueError message
self.assertRaisesMessage(
ValueError,
"Object is of type 'str', but must be a Django Model, Manager, "
"or QuerySet",
get_object_or_404, str("Article"), title__icontains="Run"
)
class CustomClass(object):
pass
self.assertRaisesMessage(
ValueError,
"Object is of type 'CustomClass', but must be a Django Model, "
"Manager, or QuerySet",
get_object_or_404, CustomClass, title__icontains="Run"
)
# Works for lists too
self.assertRaisesMessage(
ValueError,
"Object is of type 'list', but must be a Django Model, Manager, "
"or QuerySet",
get_list_or_404, [Article], title__icontains="Run"
)
|
MyRookie/SentimentAnalyse
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/utils/hashes.py
|
517
|
from __future__ import absolute_import
import hashlib
from pip.exceptions import HashMismatch, HashMissing, InstallationError
from pip.utils import read_chunks
from pip._vendor.six import iteritems, iterkeys, itervalues
# The recommended hash algo of the moment. Change this whenever the state of
# the art changes; it won't hurt backward compatibility.
FAVORITE_HASH = 'sha256'
# Names of hashlib algorithms allowed by the --hash option and ``pip hash``
# Currently, those are the ones at least as collision-resistant as sha256.
STRONG_HASHES = ['sha256', 'sha384', 'sha512']
class Hashes(object):
"""A wrapper that builds multiple hashes at once and checks them against
known-good values
"""
def __init__(self, hashes=None):
"""
:param hashes: A dict of algorithm names pointing to lists of allowed
hex digests
"""
self._allowed = {} if hashes is None else hashes
def check_against_chunks(self, chunks):
"""Check good hashes against ones built from iterable of chunks of
data.
Raise HashMismatch if none match.
"""
gots = {}
for hash_name in iterkeys(self._allowed):
try:
gots[hash_name] = hashlib.new(hash_name)
except (ValueError, TypeError):
raise InstallationError('Unknown hash name: %s' % hash_name)
for chunk in chunks:
for hash in itervalues(gots):
hash.update(chunk)
for hash_name, got in iteritems(gots):
if got.hexdigest() in self._allowed[hash_name]:
return
self._raise(gots)
def _raise(self, gots):
raise HashMismatch(self._allowed, gots)
def check_against_file(self, file):
"""Check good hashes against a file-like object
Raise HashMismatch if none match.
"""
return self.check_against_chunks(read_chunks(file))
def check_against_path(self, path):
with open(path, 'rb') as file:
return self.check_against_file(file)
def __nonzero__(self):
"""Return whether I know any known-good hashes."""
return bool(self._allowed)
def __bool__(self):
return self.__nonzero__()
class MissingHashes(Hashes):
"""A workalike for Hashes used when we're missing a hash for a requirement
It computes the actual hash of the requirement and raises a HashMissing
exception showing it to the user.
"""
def __init__(self):
"""Don't offer the ``hashes`` kwarg."""
# Pass our favorite hash in to generate a "gotten hash". With the
# empty list, it will never match, so an error will always raise.
super(MissingHashes, self).__init__(hashes={FAVORITE_HASH: []})
def _raise(self, gots):
raise HashMissing(gots[FAVORITE_HASH].hexdigest())
|
leafclick/intellij-community
|
refs/heads/master
|
plugins/hg4idea/testData/bin/mercurial/statichttprepo.py
|
91
|
# statichttprepo.py - simple http repository class for mercurial
#
# This provides read-only repo access to repositories exported via static http
#
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import changelog, byterange, url, error
import localrepo, manifest, util, scmutil, store
import urllib, urllib2, errno, os
class httprangereader(object):
def __init__(self, url, opener):
# we assume opener has HTTPRangeHandler
self.url = url
self.pos = 0
self.opener = opener
self.name = url
def seek(self, pos):
self.pos = pos
def read(self, bytes=None):
req = urllib2.Request(self.url)
end = ''
if bytes:
end = self.pos + bytes - 1
if self.pos or end:
req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
try:
f = self.opener.open(req)
data = f.read()
# Python 2.6+ defines a getcode() function, and 2.4 and
# 2.5 appear to always have an undocumented code attribute
# set. If we can't read either of those, fall back to 206
# and hope for the best.
code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
except urllib2.HTTPError, inst:
num = inst.code == 404 and errno.ENOENT or None
raise IOError(num, inst)
except urllib2.URLError, inst:
raise IOError(None, inst.reason[1])
if code == 200:
# HTTPRangeHandler does nothing if remote does not support
# Range headers and returns the full entity. Let's slice it.
if bytes:
data = data[self.pos:self.pos + bytes]
else:
data = data[self.pos:]
elif bytes:
data = data[:bytes]
self.pos += len(data)
return data
def __iter__(self):
return iter(self.read().splitlines(1))
def close(self):
pass
def build_opener(ui, authinfo):
# urllib cannot handle URLs with embedded user or passwd
urlopener = url.opener(ui, authinfo)
urlopener.add_handler(byterange.HTTPRangeHandler())
class statichttpvfs(scmutil.abstractvfs):
def __init__(self, base):
self.base = base
def __call__(self, path, mode="r", atomictemp=None):
if mode not in ('r', 'rb'):
raise IOError('Permission denied')
f = "/".join((self.base, urllib.quote(path)))
return httprangereader(f, urlopener)
def join(self, path):
if path:
return os.path.join(self.base, path)
else:
return self.base
return statichttpvfs
class statichttppeer(localrepo.localpeer):
def local(self):
return None
def canpush(self):
return False
class statichttprepository(localrepo.localrepository):
def __init__(self, ui, path):
self._url = path
self.ui = ui
self.root = path
u = util.url(path.rstrip('/') + "/.hg")
self.path, authinfo = u.authinfo()
opener = build_opener(ui, authinfo)
self.opener = opener(self.path)
self.vfs = self.opener
self._phasedefaults = []
try:
requirements = scmutil.readrequires(self.opener, self.supported)
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
requirements = set()
# check if it is a non-empty old-style repository
try:
fp = self.opener("00changelog.i")
fp.read(1)
fp.close()
except IOError, inst:
if inst.errno != errno.ENOENT:
raise
# we do not care about empty old-style repositories here
msg = _("'%s' does not appear to be an hg repository") % path
raise error.RepoError(msg)
# setup store
self.store = store.store(requirements, self.path, opener)
self.spath = self.store.path
self.sopener = self.store.opener
self.svfs = self.sopener
self.sjoin = self.store.join
self._filecache = {}
self.requirements = requirements
self.manifest = manifest.manifest(self.sopener)
self.changelog = changelog.changelog(self.sopener)
self._tags = None
self.nodetagscache = None
self._branchcaches = {}
self.encodepats = None
self.decodepats = None
def _restrictcapabilities(self, caps):
return caps.difference(["pushkey"])
def url(self):
return self._url
def local(self):
return False
def peer(self):
return statichttppeer(self)
def lock(self, wait=True):
raise util.Abort(_('cannot lock static-http repository'))
def instance(ui, path, create):
if create:
raise util.Abort(_('cannot create new static-http repository'))
return statichttprepository(ui, path[7:])
|
aemal/westcat
|
refs/heads/master
|
amcat/scripts/article_upload/fileupload.py
|
2
|
###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
"""
Helper form for file upload forms that handles decoding and zip files
"""
import logging; log = logging.getLogger(__name__)
from django import forms
import os.path
import shutil
import zipfile
import chardet
import csv
import collections
from contextlib import contextmanager
from django.core.files import File
import tempfile
import shutil
@contextmanager
def TemporaryFolder(*args, **kargs):
tempdir = tempfile.mkdtemp(*args, **kargs)
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
@contextmanager
def ZipFileContents(zip_file, *args, **kargs):
with TemporaryFolder(*args, **kargs) as tempdir:
with zipfile.ZipFile(zip_file) as zf:
files = []
for name in zf.namelist():
if name.endswith("/"): continue # skip folders
# using zipfile.extract(name, tempdir) gives an error if name contains non-ascii characters
# this may be related to http://bugs.python.org/issue17656, but we are using 2.7.3
# strange enough, the issue does not occur in 'runserver' mode, but file handling might be different?
fn = os.path.basename(name.encode("ascii", "ignore"))
# use mkstemp instead of temporary folder because we don't want it to be deleted
# it will be deleted on __exit__ anyway since the whole tempdir will be deleted
_handle, fn = tempfile.mkstemp(suffix="_"+fn, dir=tempdir)
f = open(fn, 'w')
shutil.copyfileobj(zf.open(name), f)
f.close()
files.append(File(open(fn), name=name))
yield files
DecodedFile = collections.namedtuple("File", ["name", "file", "bytes", "encoding", "text"])
ENCODINGS = ["Autodetect", "ISO-8859-15", "UTF-8", "Latin-1"]
class RawFileUploadForm(forms.Form):
"""Helper form to handle uploading a file"""
file = forms.FileField(help_text="Uploading very large files can take a long time. If you encounter timeout problems, consider uploading smaller files")
def get_entries(self):
return [self.files['file']]
class FileUploadForm(RawFileUploadForm):
"""Helper form to handle uploading a file with encoding"""
encoding = forms.ChoiceField(choices=enumerate(ENCODINGS),
initial=0, required=False,
help_text="Try to change this value when character issues arise.", )
def decode(self, bytes):
"""
Decode the given bytes using the encoding specified in the form.
If encoding is Autodetect, use (1) utf-8, (2) chardet, (3) latin-1.
Returns a tuple (encoding, text) where encoding is the actual encoding used.
"""
enc = ENCODINGS[int(self.cleaned_data['encoding'] or 0)]
if enc != 'Autodetect':
return enc, bytes.decode(enc)
try:
return "utf-8", bytes.decode('utf-8')
except UnicodeDecodeError:
pass
enc = chardet.detect(bytes)["encoding"]
if enc:
try:
return enc, bytes.decode(enc)
except UnicodeDecodeError:
pass
return 'latin-1', bytes.decode('latin-1')
def decode_file(self, f):
bytes = f.read()
enc, text = self.decode(bytes)
return DecodedFile(f.name, f, bytes, enc, text)
def get_uploaded_text(self):
"""Returns a DecodedFile object representing the file"""
return self.decode_file(self.files['file'])
def get_entries(self):
return [self.get_uploaded_text()]
DIALECTS = [("autodetect", "Autodetect"),
("excel", "CSV, comma-separated"),
("excel-semicolon", "CSV, semicolon-separated (Europe)"),
]
class excel_semicolon(csv.excel):
delimiter = ';'
csv.register_dialect("excel-semicolon", excel_semicolon)
def namedtuple_csv_reader(csv_file, encoding='utf-8', **kargs):
"""
Wraps around a csv.reader object to yield namedtuples for the rows.
Expects the first line to be the header.
@params encoding: This encoding will be used to decode all values. If None, will yield raw bytes
If encoding is an empty string or 'Autodetect', use chardet to guess the encoding
@params object_name: The class name for the namedtuple
@param kargs: Will be passed to csv.reader, e.g. dialect
"""
if encoding.lower() in ('', 'autodetect'):
encoding = chardet.detect(csv_file.read(1024))["encoding"]
log.info("Guessed encoding: {encoding}".format(**locals()))
csv_file.seek(0)
r = csv.reader(csv_file, **kargs)
return namedtuples_from_reader(r, encoding=encoding)
def _xlsx_as_csv(file):
"""
Supply a csv reader-like interface to an xlsx file
"""
from openpyxl import load_workbook
wb = load_workbook(file)
ws = wb.get_sheet_by_name(wb.get_sheet_names()[0])
for row in ws.rows:
row = [c.value for c in row]
yield row
def namedtuple_xlsx_reader(xlsx_file):
"""
Uses openpyxl to read an xslx file and provide a named-tuple interface to it
"""
reader = _xlsx_as_csv(xlsx_file)
return namedtuples_from_reader(reader)
def namedtuples_from_reader(reader, encoding=None):
"""
returns a sequence of namedtuples from a (csv-like) reader which should yield the header followed by value rows
"""
header = reader.next()
class Row(collections.namedtuple("Row", header, rename=True)):
column_names=header
def __getitem__(self, key):
if not isinstance(key, int):
# look up key in self.header
key = self.column_names.index(key)
return super(Row, self).__getitem__(key)
def items(self):
return zip(self.column_names, self)
for values in reader:
if encoding is not None:
values = [x.decode(encoding) for x in values]
if len(values) < len(header):
values += [None] * (len(header) - len(values))
yield Row(*values)
class CSVUploadForm(FileUploadForm):
dialect = forms.ChoiceField(choices=DIALECTS, initial="autodetect", required=False,
help_text="Select the kind of CSV file")
def get_entries(self):
return self.get_reader(reader_class=namedtuple_csv_reader)
def get_reader(self, reader_class=namedtuple_csv_reader):
f = self.files['file']
if f.name.endswith(".xlsx"):
if reader_class != namedtuple_csv_reader:
raise Exception("Cannot handle xlsx files with non-default reader, sorry!")
return namedtuple_xlsx_reader(f)
d = self.cleaned_data['dialect']
if not d: d = "autodetect"
if d == 'autodetect':
dialect = csv.Sniffer().sniff(f.readline())
f.seek(0)
if dialect.delimiter not in "\t,;":
dialect = csv.get_dialect('excel')
else:
dialect = csv.get_dialect(d)
enc = self.cleaned_data['encoding']
encoding = {'encoding' : ENCODINGS[int(enc)]} if enc and reader_class == namedtuple_csv_reader else {}
return reader_class(f, dialect=dialect, **encoding)
class ZipFileUploadForm(FileUploadForm):
file = forms.FileField(help_text="You can also upload a zip file containing the desired files. Uploading very large files can take a long time. If you encounter timeout problems, consider uploading smaller files")
def get_uploaded_texts(self):
"""
Returns a list of DecodedFile objects representing the zipped files,
or just a [DecodedFile] if the uploaded file was not a .zip file.
"""
f = self.files['file']
extension = os.path.splitext(f.name)[1]
if extension == ".zip":
with ZipFileContents(f) as files:
return [self.decode_file(f) for f in files]
else:
return [self.decode_file(f)]
def get_entries(self):
return self.get_uploaded_texts()
###########################################################################
# U N I T T E S T S #
###########################################################################
from amcat.tools import amcattest
class TestFileUpload(amcattest.AmCATTestCase):
def _get_entries(self, bytes, dialect="autodetect", encoding=0):
with tempfile.NamedTemporaryFile() as f:
f.write(bytes)
f.flush()
s = CSVUploadForm(dict(encoding=encoding, dialect=dialect),
dict(file=File(open(f.name))))
if not s.is_valid():
self.assertTrue(False, s.errors)
return [dict(r.items()) for r in s.get_entries()]
def _to_dict(self, rows):
return [dict(r.items()) for r in rows]
def test_csv(self):
self.assertEqual(self._get_entries("a,b\n1,2", dialect="excel"),
[dict(a='1',b='2')])
self.assertEqual(self._get_entries("a;b\n1;2", dialect="excel-semicolon"),
[dict(a='1',b='2')])
# does autodetect work?
self.assertEqual(self._get_entries("a,b\n1,2"),
[dict(a='1',b='2')])
self.assertEqual(self._get_entries("a;b\n1;2"),
[dict(a='1',b='2')])
def test_csv_reader(self):
csv = ["a,b,c", "1,2,\xe9"]
line, = namedtuple_csv_reader(csv, encoding='latin-1')
self.assertEqual(tuple(line), ("1","2",u"\xe9"))
self.assertEqual(line[0], "1")
self.assertEqual(line.a, "1")
self.assertEqual(line["a"], "1")
csv = ["a\tb", "1", "\t2"]
l1, l2 = namedtuple_csv_reader(csv, dialect='excel-tab')
self.assertEqual(l1, ('1', None))
self.assertEqual(l2, ('', '2'))
|
ludmilamarian/invenio
|
refs/heads/master
|
invenio/legacy/bibedit/__init__.py
|
2
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Legacy BibEdit."""
import warnings
from invenio.utils.deprecation import RemovedInInvenio23Warning
warnings.warn("Legacy BibEdit will be removed in 2.3. Please check "
"'invenio.modules.editor' module.",
RemovedInInvenio23Warning)
|
gh0std4ncer/thug
|
refs/heads/master
|
src/ActiveX/modules/AnswerWorks.py
|
8
|
# Vantage Linguistics AnserWorks ActiveX Controls
# CVE-2007-6387
import logging
log = logging.getLogger("Thug")
def GetHistory(self, arg):
if len(arg) > 215:
log.ThugLogging.log_exploit_event(self._window.url,
"AnswerWorks ActiveX",
"Overflow in GetHistory",
cve = 'CVE-2007-6387')
def GetSeedQuery(self, arg):
if len(arg) > 215:
log.ThugLogging.log_exploit_event(self._window.url,
"AnswerWorks ActiveX",
"Overflow in GetSeedQuery",
cve = 'CVE-2007-6387')
def SetSeedQuery(self, arg):
if len(arg) > 215:
log.ThugLogging.log_exploit_event(self._window.url,
"AnswerWorks ActiveX",
"SetSeedQuery",
cve = 'CVE-2007-6387')
|
Stranger6667/postmarker
|
refs/heads/master
|
src/postmarker/models/base.py
|
1
|
from json import loads
from ..utils import sizes
class Model:
"""Abstract data model for Postmark entities."""
_data = None
def __init__(self, manager=None, **kwargs):
self._manager = manager
self._update(kwargs)
def __str__(self):
return "{}: {}".format(self.__class__.__name__, self._data.get("ID"))
def __repr__(self):
return "<%s>" % self
def _update(self, kwargs):
if self._data:
self._data.update(kwargs)
else:
self._data = kwargs
self.__dict__.update(kwargs)
@classmethod
def from_json(cls, json, manager=None):
data = loads(json)
return cls(manager=manager, **data)
def as_dict(self):
return self._data.copy()
class ModelManager:
"""Proxies calls to main API client. Encapsulates logic of certain part of API - bounces, emails, etc."""
name = None
model = None
token_type = "server"
count_key = "count"
offset_key = "offset"
max_chunk_size = 500
def __init__(self, client):
self.client = client
def __str__(self):
return self.__class__.__name__
def __repr__(self):
return "<%s>" % self
def _init_instance(self, data):
return self.model(manager=self, **data) # pylint: disable=not-callable
def _init_many(self, data):
return [self._init_instance(part) for part in data]
def call(self, *args, **kwargs):
kwargs["token_type"] = self.token_type
return self.client.call(*args, **kwargs)
def call_many(self, *args, **kwargs):
return list(self._call_many(*args, **kwargs))
def _call_many(self, *args, **kwargs):
count = kwargs.pop(self.count_key)
offset = kwargs.pop(self.offset_key)
loaded_items_count = 0
for _count, _offset in sizes(count, offset, self.max_chunk_size):
response = self.call(*args, **self.update_kwargs(kwargs, _count, _offset))
loaded_items_count += _count
yield response
# We expect, that we will load `TotalCount` - offset items.
# This number will be less or equal to number of already loaded items.
# It could be less in case if latest response contains less items than provided `count` value.
if response["TotalCount"] - offset <= loaded_items_count:
break
def expand_responses(self, responses, key):
items = [self._init_many(response[key]) for response in responses]
return sum(items, [])
def update_kwargs(self, kwargs, count, offset):
"""Helper to support handy dictionaries merging on all Python versions."""
kwargs.update({self.count_key: count, self.offset_key: offset})
return kwargs
class SubModelManager(ModelManager):
"""Works with multiple model managers.
Example:
>>> postmark = PostmarkClient(server_token='TEST')
>>> postmark.messages.outbound.all()
[]
"""
_managers = ()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._setup_managers()
def _setup_managers(self):
for manager_class in self._managers:
instance = manager_class(self.client)
setattr(self, instance.name, instance)
class MessageModel(Model):
@property
def message(self):
return self._manager.client.messages.outbound.get(self.MessageID)
|
boooka/GeoPowerOff
|
refs/heads/master
|
venv/lib/python2.7/site-packages/django/conf/project_template/project_name/__init__.py
|
12133432
| |
stefanfoulis/django-filer-test
|
refs/heads/master
|
filer/fields/__init__.py
|
12133432
| |
iafan/zing
|
refs/heads/master
|
pootle/apps/pootle_app/views/index/__init__.py
|
12133432
| |
watersalesman/aura-botnet
|
refs/heads/master
|
aura-server/convey/tests.py
|
1
|
import datetime
from django.test import TestCase
from django.utils import timezone
from convey.models import Bot, Command, File
import json
def create_bot(
version='0.1.2',
hash_type='sha256sum',
hash_sum='testsum123',
operating_sys='Test',
ip_addr='8.8.8.8',
user='tester',
group=-5
):
return Bot.objects.create(
version=version,
hash_type=hash_type,
hash_sum=hash_sum,
operating_sys=operating_sys,
ip_addr=ip_addr,
user=user,
group=group
)
def create_command(
start_days=-5,
end_days=5,
shell="default",
cmd_txt='Test',
group_assigned=-2,
hash_assigned=None
):
start_time = timezone.now() + datetime.timedelta(days=start_days)
end_time = timezone.now() + datetime.timedelta(days=end_days)
return Command.objects.create(
start_time=start_time,
end_time=end_time,
shell=shell,
cmd_txt=cmd_txt,
group_assigned=group_assigned,
hash_assigned=hash_assigned
)
class RegisterViewTest(TestCase):
def test_linux_standard_registration(self):
user = 'user'
params = {
'version': '0.1.2',
'hash_type':'sha256sum',
'hash_sum':'alskdjf;lji2laskdjfi',
'operating_sys':'Linux',
'user': user
}
response = self.client.post('/convey/register/', params)
self.assertEqual(response.status_code, 200)
bot = Bot.objects.filter(user=user)[0]
self.assertNotEqual(bot.group, -5)
def test_linux_root_registration(self):
user = 'root'
params = {
'version': '0.1.2',
'hash_type':'sha256sum',
'hash_sum':'alskdjf;lji2laskdjfi',
'operating_sys':'Linux',
'user':user
}
response = self.client.post('/convey/register/', params)
self.assertEqual(response.status_code, 200)
bot = Bot.objects.filter(user=user)[0]
self.assertNotEqual(bot.group, -5)
def test_windows10_standard_registration(self):
user = 'user'
params = {
'version': '0.1.2',
'hash_type':'sha256sum',
'hash_sum':'alskdjf;lji2laskdjfi',
'operating_sys':'windows 10',
'user':user
}
response = self.client.post('/convey/register/', params)
self.assertEqual(response.status_code, 200)
bot = Bot.objects.filter(user=user)[0]
self.assertNotEqual(bot.group, -5)
def test_windows10_admin_registration(self):
user = 'user(admin)'
params = {
'version': '0.1.2',
'hash_type':'sha256sum',
'hash_sum':'alskdjf;lji2laskdjfi',
'operating_sys':'windows 10',
'user': user
}
response = self.client.post('/convey/register/', params)
self.assertEqual(response.status_code, 200)
bot = Bot.objects.filter(user=user)[0]
self.assertNotEqual(bot.group, -5)
def test_no_version_registration(self):
user = 'user(admin)'
params = {
'hash_type':'sha256sum',
'hash_sum':'alskdjf;lji2laskdjfi',
'operating_sys':'windows 10',
'user': user
}
response = self.client.post('/convey/register/', params)
self.assertEqual(response.status_code, 200)
bot = Bot.objects.filter(user=user)[0]
self.assertEqual(bot.version, None)
class CmdViewTests(TestCase):
def test_unauthorized_bot_post(self):
cmd = create_command()
response = self.client.post(
'/convey/cmd/',
{'hash_sum':'fake'}
)
self.assertEqual(response.status_code, 404)
def test_authorized_bot_post(self):
bot = create_bot()
cmd = create_command()
response = self.client.post(
'/convey/cmd/',
{'hash_sum': bot.hash_sum}
)
self.assertEqual(response.status_code, 200)
def test_authorized_root_bot_post(self):
bot = create_bot(group=0)
cmd = create_command(group_assigned=-2)
response = self.client.post(
'/convey/cmd/',
{'hash_sum': bot.hash_sum}
)
self.assertEqual(response.status_code, 200)
def test_early_cmd_request(self):
bot = create_bot()
create_command(5, 10)
response = self.client.post(
'/convey/cmd/',
{'hash_sum': bot.hash_sum}
)
self.assertEqual(response.status_code, 404)
def test_late_cmd_request(self):
bot = create_bot()
create_command(-10, -5)
response = self.client.post(
'/convey/cmd/',
{'hash_sum': bot.hash_sum}
)
self.assertEqual(response.status_code, 404)
def test_command_prioritizing_all(self):
create_command(-1, 1, group_assigned=-1, cmd_txt='ALL Command')
create_command(-1, 1, group_assigned=-2, cmd_txt='Default')
create_command(-1, 1, group_assigned=5, cmd_txt='Group 5')
bot = create_bot(group=5)
response = self.client.post(
'/convey/cmd/',
{'hash_sum': bot.hash_sum}
)
self.assertContains(response, 'ALL Command')
def test_command_prioritizing_individual(self):
create_command(-1, 1, hash_assigned='test', cmd_txt='Individual')
create_command(-1, 1, group_assigned=-2, cmd_txt='Default')
create_command(-1, 1, group_assigned=5, cmd_txt='Group 5')
bot = create_bot(hash_sum='test', group=5)
response = self.client.post(
'/convey/cmd/',
{'hash_sum': bot.hash_sum}
)
self.assertContains(response, 'Individual')
def test_command_group_prioritizing_group_assigned(self):
create_command(-1, 1, group_assigned=-2, cmd_txt='Default')
create_command(-1, 1, group_assigned=5, cmd_txt='Group 5')
bot = create_bot(group=5)
response = self.client.post(
'/convey/cmd/',
{'hash_sum': bot.hash_sum}
)
self.assertContains(response, 'Group 5')
def test_oneshot_command(self):
create_command(group_assigned=-2, cmd_txt='Default')
bot = create_bot(group=5)
self.client.post(
'/convey/cmd/',
{'hash_sum': bot.hash_sum}
)
response = self.client.post(
'/convey/cmd/',
{'hash_sum': bot.hash_sum}
)
self.assertEqual(response.status_code, 404)
def test_version_is_updated(self):
user = "test";
bot = create_bot()
cmd = create_command()
new_version = "UpdatedVersion"
response = self.client.post(
'/convey/cmd/',
{
'version': new_version,
'hash_sum': bot.hash_sum,
}
)
self.assertEqual(response.status_code, 200)
bot = Bot.objects.all()[0]
self.assertEqual(bot.version, new_version)
def test_json_if_version_not_none(self):
bot = create_bot()
cmd = create_command()
response = self.client.post(
'/convey/cmd/',
{
'version': bot.version,
'hash_sum': bot.hash_sum,
}
)
command_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(command_json['command_text'], cmd.cmd_txt)
self.assertEqual(command_json['shell'], cmd.shell)
def test_json_if_version_is_none(self):
bot = create_bot()
cmd = create_command()
response = self.client.post(
'/convey/cmd/',
{
'hash_sum': bot.hash_sum,
}
)
self.assertEqual(response.content.decode('utf-8'), cmd.cmd_txt)
def test_json_has_empty_file_deps(self):
bot = create_bot()
cmd = create_command()
response = self.client.post(
'/convey/cmd/',
{
'version': bot.version,
'hash_sum': bot.hash_sum,
}
)
command_json = json.loads(response.content.decode('utf-8'))
self.assertEqual(command_json['files'], [])
def test_json_has_file_deps(self):
bot = create_bot()
cmd = create_command()
cmd.file_set.bulk_create([
File(name="file1",file_type="local",path="/etc/test",command_id=cmd.id),
File(name="file2",file_type="network", path="https://google.com",command_id=cmd.id),
File(name="file3",file_type="local", path="/etc/test2",command_id=cmd.id),
])
response = self.client.post(
'/convey/cmd/',
{
'version': bot.version,
'hash_sum': bot.hash_sum,
}
)
command_json = json.loads(response.content.decode('utf-8'))
self.assertNotEqual(command_json['files'], [])
self.assertEqual(len(command_json['files']), 3)
dep_data = [
{'name': 'file1', 'type': 'local', 'path': '/etc/test'},
{'name': 'file2', 'type': 'network', 'path': 'https://google.com'},
{'name': 'file3', 'type': 'local', 'path': '/etc/test2'},
]
self.assertEqual(dep_data, command_json['files'])
|
ishay2b/tensorflow
|
refs/heads/segnet
|
tensorflow/tools/api/tests/api_compatibility_test.py
|
34
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import unittest
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, verbose_diffs, default False:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER = 'tensorflow/tools/api/golden'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
def _KeyToFilePath(key):
"""From a given key, construct a filepath."""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
return os.path.join(_API_GOLDEN_FOLDER, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub(
'((-[a-z]){1})', _ReplaceDashWithCaps, base_filename_without_ext)
return api_object_key
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden
files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed).' % key
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
logging.error('Issue %d\t: %s', i + 1, messages[i])
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
@unittest.skipUnless(
sys.version_info.major == 2 and os.uname()[0] == 'Linux',
'API compabitility test goldens are generated using python2 on Linux.')
def testAPIBackwardsCompatibility(self):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map['tf'].append('contrib')
traverse.traverse(tf, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
expression = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*'))
golden_file_list = file_io.get_matching_files(expression)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=False, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
|
mccheung/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/lib2to3/fixes/fix_future.py
|
529
|
"""Remove __future__ imports
from __future__ import foo is replaced with an empty line.
"""
# Author: Christian Heimes
# Local imports
from .. import fixer_base
from ..fixer_util import BlankLine
class FixFuture(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """import_from< 'from' module_name="__future__" 'import' any >"""
# This should be run last -- some things check for the import
run_order = 10
def transform(self, node, results):
new = BlankLine()
new.prefix = node.prefix
return new
|
clan2000/data-science-from-scratch
|
refs/heads/master
|
code-python3/most_common_words.py
|
16
|
# most_common_words.py
import sys
from collections import Counter
if __name__ == "__main__":
# pass in number of words as first argument
try:
num_words = int(sys.argv[1])
except:
print("usage: most_common_words.py num_words")
sys.exit(1) # non-zero exit code indicates error
counter = Counter(word.lower()
for line in sys.stdin
for word in line.strip().split()
if word)
for word, count in counter.most_common(num_words):
sys.stdout.write(str(count))
sys.stdout.write("\t")
sys.stdout.write(word)
sys.stdout.write("\n")
|
simongoffin/website_version
|
refs/heads/Multi_fonctionnel
|
addons/hr/__init__.py
|
382
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr
import res_config
import res_users
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
babble/babble
|
refs/heads/master
|
include/jython/Lib/test/bugs/pr239.py
|
31
|
# A test for PR#239, escaping a quote inside a triple quoted string.
s = r"""
\""" 1.triple-quote
\""" 2.triple-quote
"""
|
ccomb/OpenUpgrade
|
refs/heads/master
|
addons/sale/__openerp__.py
|
52
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sales Management',
'version': '1.0',
'category': 'Sales Management',
'sequence': 14,
'summary': 'Quotations, Sales Orders, Invoicing',
'description': """
Manage sales quotations and orders
==================================
This application allows you to manage your sales goals in an effective and efficient manner by keeping track of all sales orders and history.
It handles the full sales workflow:
* **Quotation** -> **Sales order** -> **Invoice**
Preferences (only with Warehouse Management installed)
------------------------------------------------------
If you also installed the Warehouse Management, you can deal with the following preferences:
* Shipping: Choice of delivery at once or partial delivery
* Invoicing: choose how invoices will be paid
* Incoterms: International Commercial terms
You can choose flexible invoicing methods:
* *On Demand*: Invoices are created manually from Sales Orders when needed
* *On Delivery Order*: Invoices are generated from picking (delivery)
* *Before Delivery*: A Draft invoice is created and must be paid before delivery
The Dashboard for the Sales Manager will include
------------------------------------------------
* My Quotations
* Monthly Turnover (Graph)
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/Sale_order_line_to_invoice.jpeg','images/sale_order.jpeg','images/sales_analysis.jpeg'],
'depends': ['sales_team','account_voucher', 'procurement', 'report'],
'data': [
'wizard/sale_make_invoice_advance.xml',
'wizard/sale_line_invoice.xml',
'wizard/sale_make_invoice.xml',
'security/sale_security.xml',
'security/ir.model.access.csv',
'sale_workflow.xml',
'sale_sequence.xml',
'sale_report.xml',
'sale_data.xml',
'sale_view.xml',
'sales_team_view.xml',
'res_partner_view.xml',
'report/sale_report_view.xml',
'edi/sale_order_action_data.xml',
'res_config_view.xml',
'views/report_saleorder.xml',
],
'demo': ['sale_demo.xml'],
'test': [
'test/create_sale_users.yml',
'test/sale_order_demo.yml',
'test/manual_order_policy.yml',
'test/cancel_order.yml',
'test/delete_order.yml',
'test/edi_sale_order.yml',
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
eeshangarg/oh-mainline
|
refs/heads/master
|
vendor/packages/scrapy/scrapy/tests/test_spidermanager/test_spiders/spider2.py
|
28
|
from scrapy.spider import BaseSpider
class Spider2(BaseSpider):
name = "spider2"
allowed_domains = ["scrapy2.org", "scrapy3.org"]
|
danpetrikin/django-tracking
|
refs/heads/master
|
demo/__init__.py
|
12133432
| |
bhupennewalkar1337/erpnext
|
refs/heads/develop
|
erpnext/docs/assets/img/selling/__init__.py
|
12133432
| |
beckastar/django
|
refs/heads/master
|
tests/m2m_through/__init__.py
|
12133432
| |
2hdddg/pyvidstream
|
refs/heads/master
|
vidutil/__init__.py
|
12133432
| |
overcastcloud/trollius
|
refs/heads/master
|
tests/test_proactor_events.py
|
1
|
"""Tests for proactor_events.py"""
import socket
import unittest
from trollius import test_utils
from trollius.proactor_events import BaseProactorEventLoop
from trollius.proactor_events import _ProactorDuplexPipeTransport
from trollius.proactor_events import _ProactorSocketTransport
from trollius.proactor_events import _ProactorWritePipeTransport
from trollius.py33_exceptions import ConnectionAbortedError, ConnectionResetError
from trollius.test_utils import mock
import trollius as asyncio
def close_transport(transport):
# Don't call transport.close() because the event loop and the IOCP proactor
# are mocked
if transport._sock is None:
return
transport._sock.close()
transport._sock = None
class ProactorSocketTransportTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
self.addCleanup(self.loop.close)
self.proactor = mock.Mock()
self.loop._proactor = self.proactor
self.protocol = test_utils.make_test_protocol(asyncio.Protocol)
self.sock = mock.Mock(socket.socket)
def socket_transport(self, waiter=None):
transport = _ProactorSocketTransport(self.loop, self.sock,
self.protocol, waiter=waiter)
self.addCleanup(close_transport, transport)
return transport
def test_ctor(self):
fut = asyncio.Future(loop=self.loop)
tr = self.socket_transport(waiter=fut)
test_utils.run_briefly(self.loop)
self.assertIsNone(fut.result())
self.protocol.connection_made(tr)
self.proactor.recv.assert_called_with(self.sock, 4096)
def test_loop_reading(self):
tr = self.socket_transport()
tr._loop_reading()
self.loop._proactor.recv.assert_called_with(self.sock, 4096)
self.assertFalse(self.protocol.data_received.called)
self.assertFalse(self.protocol.eof_received.called)
def test_loop_reading_data(self):
res = asyncio.Future(loop=self.loop)
res.set_result(b'data')
tr = self.socket_transport()
tr._read_fut = res
tr._loop_reading(res)
self.loop._proactor.recv.assert_called_with(self.sock, 4096)
self.protocol.data_received.assert_called_with(b'data')
def test_loop_reading_no_data(self):
res = asyncio.Future(loop=self.loop)
res.set_result(b'')
tr = self.socket_transport()
self.assertRaises(AssertionError, tr._loop_reading, res)
tr.close = mock.Mock()
tr._read_fut = res
tr._loop_reading(res)
self.assertFalse(self.loop._proactor.recv.called)
self.assertTrue(self.protocol.eof_received.called)
self.assertTrue(tr.close.called)
def test_loop_reading_aborted(self):
err = self.loop._proactor.recv.side_effect = ConnectionAbortedError()
tr = self.socket_transport()
tr._fatal_error = mock.Mock()
tr._loop_reading()
tr._fatal_error.assert_called_with(
err,
'Fatal read error on pipe transport')
def test_loop_reading_aborted_closing(self):
self.loop._proactor.recv.side_effect = ConnectionAbortedError()
tr = self.socket_transport()
tr._closing = True
tr._fatal_error = mock.Mock()
tr._loop_reading()
self.assertFalse(tr._fatal_error.called)
def test_loop_reading_aborted_is_fatal(self):
self.loop._proactor.recv.side_effect = ConnectionAbortedError()
tr = self.socket_transport()
tr._closing = False
tr._fatal_error = mock.Mock()
tr._loop_reading()
self.assertTrue(tr._fatal_error.called)
def test_loop_reading_conn_reset_lost(self):
err = self.loop._proactor.recv.side_effect = ConnectionResetError()
tr = self.socket_transport()
tr._closing = False
tr._fatal_error = mock.Mock()
tr._force_close = mock.Mock()
tr._loop_reading()
self.assertFalse(tr._fatal_error.called)
tr._force_close.assert_called_with(err)
def test_loop_reading_exception(self):
err = self.loop._proactor.recv.side_effect = (OSError())
tr = self.socket_transport()
tr._fatal_error = mock.Mock()
tr._loop_reading()
tr._fatal_error.assert_called_with(
err,
'Fatal read error on pipe transport')
def test_write(self):
tr = self.socket_transport()
tr._loop_writing = mock.Mock()
tr.write(b'data')
self.assertEqual(tr._buffer, None)
tr._loop_writing.assert_called_with(data=b'data')
def test_write_no_data(self):
tr = self.socket_transport()
tr.write(b'')
self.assertFalse(tr._buffer)
def test_write_more(self):
tr = self.socket_transport()
tr._write_fut = mock.Mock()
tr._loop_writing = mock.Mock()
tr.write(b'data')
self.assertEqual(tr._buffer, b'data')
self.assertFalse(tr._loop_writing.called)
def test_loop_writing(self):
tr = self.socket_transport()
tr._buffer = bytearray(b'data')
tr._loop_writing()
self.loop._proactor.send.assert_called_with(self.sock, b'data')
self.loop._proactor.send.return_value.add_done_callback.\
assert_called_with(tr._loop_writing)
@mock.patch('trollius.proactor_events.logger')
def test_loop_writing_err(self, m_log):
err = self.loop._proactor.send.side_effect = OSError()
tr = self.socket_transport()
tr._fatal_error = mock.Mock()
tr._buffer = [b'da', b'ta']
tr._loop_writing()
tr._fatal_error.assert_called_with(
err,
'Fatal write error on pipe transport')
tr._conn_lost = 1
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
tr.write(b'data')
self.assertEqual(tr._buffer, None)
m_log.warning.assert_called_with('socket.send() raised exception.')
def test_loop_writing_stop(self):
fut = asyncio.Future(loop=self.loop)
fut.set_result(b'data')
tr = self.socket_transport()
tr._write_fut = fut
tr._loop_writing(fut)
self.assertIsNone(tr._write_fut)
def test_loop_writing_closing(self):
fut = asyncio.Future(loop=self.loop)
fut.set_result(1)
tr = self.socket_transport()
tr._write_fut = fut
tr.close()
tr._loop_writing(fut)
self.assertIsNone(tr._write_fut)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
def test_abort(self):
tr = self.socket_transport()
tr._force_close = mock.Mock()
tr.abort()
tr._force_close.assert_called_with(None)
def test_close(self):
tr = self.socket_transport()
tr.close()
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
self.assertTrue(tr._closing)
self.assertEqual(tr._conn_lost, 1)
self.protocol.connection_lost.reset_mock()
tr.close()
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
def test_close_write_fut(self):
tr = self.socket_transport()
tr._write_fut = mock.Mock()
tr.close()
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
def test_close_buffer(self):
tr = self.socket_transport()
tr._buffer = [b'data']
tr.close()
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
@mock.patch('trollius.base_events.logger')
def test_fatal_error(self, m_logging):
tr = self.socket_transport()
tr._force_close = mock.Mock()
tr._fatal_error(None)
self.assertTrue(tr._force_close.called)
self.assertTrue(m_logging.error.called)
def test_force_close(self):
tr = self.socket_transport()
tr._buffer = [b'data']
read_fut = tr._read_fut = mock.Mock()
write_fut = tr._write_fut = mock.Mock()
tr._force_close(None)
read_fut.cancel.assert_called_with()
write_fut.cancel.assert_called_with()
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
self.assertEqual(None, tr._buffer)
self.assertEqual(tr._conn_lost, 1)
def test_force_close_idempotent(self):
tr = self.socket_transport()
tr._closing = True
tr._force_close(None)
test_utils.run_briefly(self.loop)
self.assertFalse(self.protocol.connection_lost.called)
def test_fatal_error_2(self):
tr = self.socket_transport()
tr._buffer = [b'data']
tr._force_close(None)
test_utils.run_briefly(self.loop)
self.protocol.connection_lost.assert_called_with(None)
self.assertEqual(None, tr._buffer)
def test_call_connection_lost(self):
tr = self.socket_transport()
tr._call_connection_lost(None)
self.assertTrue(self.protocol.connection_lost.called)
self.assertTrue(self.sock.close.called)
def test_write_eof(self):
tr = self.socket_transport()
self.assertTrue(tr.can_write_eof())
tr.write_eof()
self.sock.shutdown.assert_called_with(socket.SHUT_WR)
tr.write_eof()
self.assertEqual(self.sock.shutdown.call_count, 1)
tr.close()
def test_write_eof_buffer(self):
tr = self.socket_transport()
f = asyncio.Future(loop=self.loop)
tr._loop._proactor.send.return_value = f
tr.write(b'data')
tr.write_eof()
self.assertTrue(tr._eof_written)
self.assertFalse(self.sock.shutdown.called)
tr._loop._proactor.send.assert_called_with(self.sock, b'data')
f.set_result(4)
self.loop._run_once()
self.sock.shutdown.assert_called_with(socket.SHUT_WR)
tr.close()
def test_write_eof_write_pipe(self):
tr = _ProactorWritePipeTransport(
self.loop, self.sock, self.protocol)
self.assertTrue(tr.can_write_eof())
tr.write_eof()
self.assertTrue(tr._closing)
self.loop._run_once()
self.assertTrue(self.sock.close.called)
tr.close()
def test_write_eof_buffer_write_pipe(self):
tr = _ProactorWritePipeTransport(self.loop, self.sock, self.protocol)
f = asyncio.Future(loop=self.loop)
tr._loop._proactor.send.return_value = f
tr.write(b'data')
tr.write_eof()
self.assertTrue(tr._closing)
self.assertFalse(self.sock.shutdown.called)
tr._loop._proactor.send.assert_called_with(self.sock, b'data')
f.set_result(4)
self.loop._run_once()
self.loop._run_once()
self.assertTrue(self.sock.close.called)
tr.close()
def test_write_eof_duplex_pipe(self):
tr = _ProactorDuplexPipeTransport(
self.loop, self.sock, self.protocol)
self.assertFalse(tr.can_write_eof())
with self.assertRaises(NotImplementedError):
tr.write_eof()
close_transport(tr)
def test_pause_resume_reading(self):
tr = self.socket_transport()
futures = []
for msg in [b'data1', b'data2', b'data3', b'data4', b'']:
f = asyncio.Future(loop=self.loop)
f.set_result(msg)
futures.append(f)
self.loop._proactor.recv.side_effect = futures
self.loop._run_once()
self.assertFalse(tr._paused)
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data1')
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data2')
tr.pause_reading()
self.assertTrue(tr._paused)
for i in range(10):
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data2')
tr.resume_reading()
self.assertFalse(tr._paused)
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data3')
self.loop._run_once()
self.protocol.data_received.assert_called_with(b'data4')
tr.close()
def pause_writing_transport(self, high):
tr = self.socket_transport()
tr.set_write_buffer_limits(high=high)
self.assertEqual(tr.get_write_buffer_size(), 0)
self.assertFalse(self.protocol.pause_writing.called)
self.assertFalse(self.protocol.resume_writing.called)
return tr
def test_pause_resume_writing(self):
tr = self.pause_writing_transport(high=4)
# write a large chunk, must pause writing
fut = asyncio.Future(loop=self.loop)
self.loop._proactor.send.return_value = fut
tr.write(b'large data')
self.loop._run_once()
self.assertTrue(self.protocol.pause_writing.called)
# flush the buffer
fut.set_result(None)
self.loop._run_once()
self.assertEqual(tr.get_write_buffer_size(), 0)
self.assertTrue(self.protocol.resume_writing.called)
def test_pause_writing_2write(self):
tr = self.pause_writing_transport(high=4)
# first short write, the buffer is not full (3 <= 4)
fut1 = asyncio.Future(loop=self.loop)
self.loop._proactor.send.return_value = fut1
tr.write(b'123')
self.loop._run_once()
self.assertEqual(tr.get_write_buffer_size(), 3)
self.assertFalse(self.protocol.pause_writing.called)
# fill the buffer, must pause writing (6 > 4)
tr.write(b'abc')
self.loop._run_once()
self.assertEqual(tr.get_write_buffer_size(), 6)
self.assertTrue(self.protocol.pause_writing.called)
def test_pause_writing_3write(self):
tr = self.pause_writing_transport(high=4)
# first short write, the buffer is not full (1 <= 4)
fut = asyncio.Future(loop=self.loop)
self.loop._proactor.send.return_value = fut
tr.write(b'1')
self.loop._run_once()
self.assertEqual(tr.get_write_buffer_size(), 1)
self.assertFalse(self.protocol.pause_writing.called)
# second short write, the buffer is not full (3 <= 4)
tr.write(b'23')
self.loop._run_once()
self.assertEqual(tr.get_write_buffer_size(), 3)
self.assertFalse(self.protocol.pause_writing.called)
# fill the buffer, must pause writing (6 > 4)
tr.write(b'abc')
self.loop._run_once()
self.assertEqual(tr.get_write_buffer_size(), 6)
self.assertTrue(self.protocol.pause_writing.called)
def test_dont_pause_writing(self):
tr = self.pause_writing_transport(high=4)
# write a large chunk which completes immedialty,
# it should not pause writing
fut = asyncio.Future(loop=self.loop)
fut.set_result(None)
self.loop._proactor.send.return_value = fut
tr.write(b'very large data')
self.loop._run_once()
self.assertEqual(tr.get_write_buffer_size(), 0)
self.assertFalse(self.protocol.pause_writing.called)
class BaseProactorEventLoopTests(test_utils.TestCase):
def setUp(self):
self.sock = mock.Mock(socket.socket)
self.proactor = mock.Mock()
self.ssock, self.csock = mock.Mock(), mock.Mock()
class EventLoop(BaseProactorEventLoop):
def _socketpair(s):
return (self.ssock, self.csock)
self.loop = EventLoop(self.proactor)
self.set_event_loop(self.loop)
@mock.patch.object(BaseProactorEventLoop, 'call_soon')
@mock.patch.object(BaseProactorEventLoop, '_socketpair')
def test_ctor(self, socketpair, call_soon):
ssock, csock = socketpair.return_value = (
mock.Mock(), mock.Mock())
loop = BaseProactorEventLoop(self.proactor)
self.assertIs(loop._ssock, ssock)
self.assertIs(loop._csock, csock)
self.assertEqual(loop._internal_fds, 1)
call_soon.assert_called_with(loop._loop_self_reading)
loop.close()
def test_close_self_pipe(self):
self.loop._close_self_pipe()
self.assertEqual(self.loop._internal_fds, 0)
self.assertTrue(self.ssock.close.called)
self.assertTrue(self.csock.close.called)
self.assertIsNone(self.loop._ssock)
self.assertIsNone(self.loop._csock)
# Don't call close(): _close_self_pipe() cannot be called twice
self.loop._closed = True
def test_close(self):
self.loop._close_self_pipe = mock.Mock()
self.loop.close()
self.assertTrue(self.loop._close_self_pipe.called)
self.assertTrue(self.proactor.close.called)
self.assertIsNone(self.loop._proactor)
self.loop._close_self_pipe.reset_mock()
self.loop.close()
self.assertFalse(self.loop._close_self_pipe.called)
def test_sock_recv(self):
self.loop.sock_recv(self.sock, 1024)
self.proactor.recv.assert_called_with(self.sock, 1024)
def test_sock_sendall(self):
self.loop.sock_sendall(self.sock, b'data')
self.proactor.send.assert_called_with(self.sock, b'data')
def test_sock_connect(self):
self.loop.sock_connect(self.sock, 123)
self.proactor.connect.assert_called_with(self.sock, 123)
def test_sock_accept(self):
self.loop.sock_accept(self.sock)
self.proactor.accept.assert_called_with(self.sock)
def test_socketpair(self):
self.assertRaises(
NotImplementedError, BaseProactorEventLoop, self.proactor)
def test_make_socket_transport(self):
tr = self.loop._make_socket_transport(self.sock, asyncio.Protocol())
self.assertIsInstance(tr, _ProactorSocketTransport)
close_transport(tr)
def test_loop_self_reading(self):
self.loop._loop_self_reading()
self.proactor.recv.assert_called_with(self.ssock, 4096)
self.proactor.recv.return_value.add_done_callback.assert_called_with(
self.loop._loop_self_reading)
def test_loop_self_reading_fut(self):
fut = mock.Mock()
self.loop._loop_self_reading(fut)
self.assertTrue(fut.result.called)
self.proactor.recv.assert_called_with(self.ssock, 4096)
self.proactor.recv.return_value.add_done_callback.assert_called_with(
self.loop._loop_self_reading)
def test_loop_self_reading_exception(self):
self.loop.close = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
self.proactor.recv.side_effect = OSError()
self.loop._loop_self_reading()
self.assertTrue(self.loop.call_exception_handler.called)
def test_write_to_self(self):
self.loop._write_to_self()
self.csock.send.assert_called_with(b'\0')
def test_process_events(self):
self.loop._process_events([])
@mock.patch('trollius.base_events.logger')
def test_create_server(self, m_log):
pf = mock.Mock()
call_soon = self.loop.call_soon = mock.Mock()
self.loop._start_serving(pf, self.sock)
self.assertTrue(call_soon.called)
# callback
loop = call_soon.call_args[0][0]
loop()
self.proactor.accept.assert_called_with(self.sock)
# conn
fut = mock.Mock()
fut.result.return_value = (mock.Mock(), mock.Mock())
make_tr = self.loop._make_socket_transport = mock.Mock()
loop(fut)
self.assertTrue(fut.result.called)
self.assertTrue(make_tr.called)
# exception
fut.result.side_effect = OSError()
loop(fut)
self.assertTrue(self.sock.close.called)
self.assertTrue(m_log.error.called)
def test_create_server_cancel(self):
pf = mock.Mock()
call_soon = self.loop.call_soon = mock.Mock()
self.loop._start_serving(pf, self.sock)
loop = call_soon.call_args[0][0]
# cancelled
fut = asyncio.Future(loop=self.loop)
fut.cancel()
loop(fut)
self.assertTrue(self.sock.close.called)
def test_stop_serving(self):
sock = mock.Mock()
self.loop._stop_serving(sock)
self.assertTrue(sock.close.called)
self.proactor._stop_serving.assert_called_with(sock)
if __name__ == '__main__':
unittest.main()
|
hrishioa/Aviato
|
refs/heads/master
|
flask/Lib/site-packages/nltk/test/portuguese_en_fixt.py
|
24
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from nltk.compat import PY3
from nltk.corpus import teardown_module
def setup_module(module):
from nose import SkipTest
raise SkipTest("portuguese_en.doctest imports nltk.examples.pt which doesn't exist!")
if not PY3:
raise SkipTest(
"portuguese_en.doctest was skipped because non-ascii doctests are not supported under Python 2.x"
)
|
MihaiMoldovanu/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/azure/azure_rm_virtualmachine_extension.py
|
21
|
#!/usr/bin/python
#
# Copyright (c) 2017 Sertac Ozercan <seozerca@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachine_extension
version_added: "2.4"
short_description: Managed Azure Virtual Machine extension
description:
- Create, update and delete Azure Virtual Machine Extension
options:
resource_group:
description:
- Name of a resource group where the vm extension exists or will be created.
required: true
name:
description:
- Name of the vm extension
required: true
state:
description:
- Assert the state of the vm extension. Use 'present' to create or update a vm extension and
'absent' to delete a vm extension.
default: present
choices:
- absent
- present
required: false
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
virtual_machine_name:
description:
- The name of the virtual machine where the extension should be create or updated.
required: false
publisher:
description:
- The name of the extension handler publisher.
required: false
virtual_machine_extension_type:
description:
- The type of the extension handler.
required: false
type_handler_version:
description:
- The type version of the extension handler.
required: false
settings:
description:
- Json formatted public settings for the extension.
required: false
protected_settings:
description:
- Json formatted protected settings for the extension.
required: false
auto_upgrade_minor_version:
description:
- Whether the extension handler should be automatically upgraded across minor versions.
required: false
extends_documentation_fragment:
- azure
author:
- "Sertac Ozercan (@sozercan)"
- "Julien Stroheker (@ju_stroh)"
'''
EXAMPLES = '''
- name: Create VM Extension
azure_rm_virtualmachine_extension:
name: myvmextension
location: eastus
resource_group: Testing
virtual_machine_name: myvm
publisher: Microsoft.Azure.Extensions
virtual_machine_extension_type: CustomScript
type_handler_version: 2.0
settings: '{"commandToExecute": "hostname"}'
auto_upgrade_minor_version: true
- name: Delete VM Extension
azure_rm_virtualmachine_extension:
name: myvmextension
location: eastus
resource_group: Testing
virtual_machine_name: myvm
state: absent
'''
RETURN = '''
state:
description: Current state of the vm extension
returned: always
type: dict
changed:
description: Whether or not the resource has changed
returned: always
type: bool
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.compute.models import (
VirtualMachineExtension
)
except ImportError:
# This is handled in azure_rm_common
pass
def vmextension_to_dict(extension):
'''
Serializing the VM Extension from the API to Dict
:return: dict
'''
return dict(
id=extension.id,
name=extension.name,
location=extension.location,
publisher=extension.publisher,
virtual_machine_extension_type=extension.virtual_machine_extension_type,
type_handler_version=extension.type_handler_version,
auto_upgrade_minor_version=extension.auto_upgrade_minor_version,
settings=extension.settings,
protected_settings=extension.protected_settings,
)
class AzureRMVMExtension(AzureRMModuleBase):
"""Configuration class for an Azure RM VM Extension resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
required=False,
default='present',
choices=['present', 'absent']
),
location=dict(
type='str',
required=False
),
virtual_machine_name=dict(
type='str',
required=False
),
publisher=dict(
type='str',
required=False
),
virtual_machine_extension_type=dict(
type='str',
required=False
),
type_handler_version=dict(
type='str',
required=False
),
auto_upgrade_minor_version=dict(
type='bool',
required=False
),
settings=dict(
type='dict',
required=False
),
protected_settings=dict(
type='dict',
required=False
)
)
self.resource_group = None
self.name = None
self.location = None
self.publisher = None
self.virtual_machine_extension_type = None
self.type_handler_version = None
self.auto_upgrade_minor_version = None
self.settings = None
self.protected_settings = None
self.state = None
self.results = dict(changed=False, state=dict())
super(AzureRMVMExtension, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=False,
supports_tags=False)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
resource_group = None
response = None
to_be_updated = False
try:
resource_group = self.get_resource_group(self.resource_group)
except CloudError:
self.fail('resource group {} not found'.format(self.resource_group))
if not self.location:
self.location = resource_group.location
if self.state == 'present':
response = self.get_vmextension()
if not response:
to_be_updated = True
else:
if response['settings'] != self.settings:
response['settings'] = self.settings
to_be_updated = True
if response['protected_settings'] != self.protected_settings:
response['protected_settings'] = self.protected_settings
to_be_updated = True
if to_be_updated:
self.results['changed'] = True
self.results['state'] = self.create_or_update_vmextension()
elif self.state == 'absent':
self.delete_vmextension()
self.results['changed'] = True
return self.results
def create_or_update_vmextension(self):
'''
Method calling the Azure SDK to create or update the VM extension.
:return: void
'''
self.log("Creating VM extension {0}".format(self.name))
try:
params = VirtualMachineExtension(
location=self.location,
publisher=self.publisher,
virtual_machine_extension_type=self.virtual_machine_extension_type,
type_handler_version=self.type_handler_version,
auto_upgrade_minor_version=self.auto_upgrade_minor_version,
settings=self.settings,
protected_settings=self.protected_settings
)
poller = self.compute_client.virtual_machine_extensions.create_or_update(self.resource_group, self.virtual_machine_name, self.name, params)
response = self.get_poller_result(poller)
return vmextension_to_dict(response)
except CloudError as e:
self.log('Error attempting to create the VM extension.')
self.fail("Error creating the VM extension: {0}".format(str(e)))
def delete_vmextension(self):
'''
Method calling the Azure SDK to delete the VM Extension.
:return: void
'''
self.log("Deleting vmextension {0}".format(self.name))
try:
poller = self.compute_client.virtual_machine_extensions.delete(self.resource_group, self.virtual_machine_name, self.name)
self.get_poller_result(poller)
except CloudError as e:
self.log('Error attempting to delete the vmextension.')
self.fail("Error deleting the vmextension: {0}".format(str(e)))
def get_vmextension(self):
'''
Method calling the Azure SDK to get a VM Extension.
:return: void
'''
self.log("Checking if the vm extension {0} is present".format(self.name))
found = False
try:
response = self.compute_client.virtual_machine_extensions.get(self.resource_group, self.virtual_machine_name, self.name)
found = True
except CloudError as e:
self.log('Did not find vm extension')
if found:
return vmextension_to_dict(response)
else:
return False
def main():
"""Main execution"""
AzureRMVMExtension()
if __name__ == '__main__':
main()
|
tolokoban/grenier
|
refs/heads/master
|
src/articles/allergique_a_la_primalite/code.py
|
1
|
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23,
29, 31, 37, 41, 43, 47, 53, 59, 61,
67, 71, 73, 79, 83, 89, 97]
def ok(n):
if n + 1 in primes: return False
for a in range(7):
maskA = 2**a
if n >= maskA:
for b in range(a):
maskB = 2**b
v = n ^ maskA ^ maskB
if v in primes: return False
return True
for n in range(10, 101):
if ok(n): print(n, bin(n))
|
ryfeus/lambda-packs
|
refs/heads/master
|
Keras_tensorflow/source/numpy/lib/twodim_base.py
|
26
|
""" Basic functions for manipulating 2d arrays
"""
from __future__ import division, absolute_import, print_function
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones, asarray,
where, int8, int16, int32, int64, empty, promote_types, diagonal,
)
from numpy.core import iinfo
__all__ = [
'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'rot90', 'tri', 'triu',
'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
def _min_int(low, high):
""" get small int that fits the range """
if high <= i1.max and low >= i1.min:
return int8
if high <= i2.max and low >= i2.min:
return int16
if high <= i4.max and low >= i4.min:
return int32
return int64
def fliplr(m):
"""
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to A[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.fliplr(A)
array([[ 0., 0., 1.],
[ 0., 2., 0.],
[ 3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A)==A[:,::-1,...])
True
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must be >= 2-d.")
return m[:, ::-1]
def flipud(m):
"""
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``A[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
array([[ 1., 0., 0.],
[ 0., 2., 0.],
[ 0., 0., 3.]])
>>> np.flipud(A)
array([[ 0., 0., 3.],
[ 0., 2., 0.],
[ 1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A)==A[::-1,...])
True
>>> np.flipud([1,2])
array([2, 1])
"""
m = asanyarray(m)
if m.ndim < 1:
raise ValueError("Input must be >= 1-d.")
return m[::-1, ...]
def rot90(m, k=1):
"""
Rotate an array by 90 degrees in the counter-clockwise direction.
The first two dimensions are rotated; therefore, the array must be at
least 2-D.
Parameters
----------
m : array_like
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
Returns
-------
y : ndarray
Rotated array.
See Also
--------
fliplr : Flip an array horizontally.
flipud : Flip an array vertically.
Examples
--------
>>> m = np.array([[1,2],[3,4]], int)
>>> m
array([[1, 2],
[3, 4]])
>>> np.rot90(m)
array([[2, 4],
[1, 3]])
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]])
"""
m = asanyarray(m)
if m.ndim < 2:
raise ValueError("Input must >= 2-d.")
k = k % 4
if k == 0:
return m
elif k == 1:
return fliplr(m).swapaxes(0, 1)
elif k == 2:
return fliplr(flipud(m))
else:
# k == 3
return fliplr(m.swapaxes(0, 1))
def eye(N, M=None, k=0, dtype=float):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to `N`.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal, and a negative value
to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero, except for the `k`-th
diagonal, whose values are equal to one.
See Also
--------
identity : (almost) equivalent function
diag : diagonal 2-D array from a 1-D array specified by the user.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
array([[ 0., 1., 0.],
[ 0., 0., 1.],
[ 0., 0., 0.]])
"""
if M is None:
M = N
m = zeros((N, M), dtype=dtype)
if k >= M:
return m
if k >= 0:
i = k
else:
i = (-k) * M
m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
See the more detailed documentation for ``numpy.diagonal`` if you use this
function to extract a diagonal and wish to write to the resulting array;
whether it returns a copy or a view depends on what version of numpy you
are using.
Parameters
----------
v : array_like
If `v` is a 2-D array, return a copy of its `k`-th diagonal.
If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
diagonal.
k : int, optional
Diagonal in question. The default is 0. Use `k>0` for diagonals
above the main diagonal, and `k<0` for diagonals below the main
diagonal.
Returns
-------
out : ndarray
The extracted diagonal or constructed diagonal array.
See Also
--------
diagonal : Return specified diagonals.
diagflat : Create a 2-D array with the flattened input as a diagonal.
trace : Sum along diagonals.
triu : Upper triangle of an array.
tril : Lower triangle of an array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
v = asanyarray(v)
s = v.shape
if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n, n), v.dtype)
if k >= 0:
i = k
else:
i = (-k) * n
res[:n-k].flat[i::n+1] = v
return res
elif len(s) == 2:
return diagonal(v, k)
else:
raise ValueError("Input must be 1- or 2-d.")
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
try:
wrap = v.__array_wrap__
except AttributeError:
wrap = None
v = asarray(v).ravel()
s = len(v)
n = s + abs(k)
res = zeros((n, n), v.dtype)
if (k >= 0):
i = arange(0, n-k)
fi = i+k+i*n
else:
i = arange(0, n+k)
fi = i+(i-k)*n
res.flat[fi] = v
if not wrap:
return res
return wrap(res)
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[ 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0.]])
"""
if M is None:
M = N
m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
arange(-k, M-k, dtype=_min_int(-k, M - k)))
# Avoid making a copy if the requested type is already bool
m = m.astype(dtype, copy=False)
return m
def tril(m, k=0):
"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : array_like, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 0, 0, 0],
[ 4, 0, 0],
[ 7, 8, 0],
[10, 11, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k, dtype=bool)
return where(mask, m, zeros(1, m.dtype))
def triu(m, k=0):
"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
m = asanyarray(m)
mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
return where(mask, zeros(1, m.dtype), m)
# Originally borrowed from John Hunter and matplotlib
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
The columns of the output matrix are powers of the input vector. The
order of the powers is determined by the `increasing` boolean argument.
Specifically, when `increasing` is False, the `i`-th output column is
the input vector raised element-wise to the power of ``N - i - 1``. Such
a matrix with a geometric progression in each row is named for Alexandre-
Theophile Vandermonde.
Parameters
----------
x : array_like
1-D input array.
N : int, optional
Number of columns in the output. If `N` is not specified, a square
array is returned (``N = len(x)``).
increasing : bool, optional
Order of the powers of the columns. If True, the powers increase
from left to right, if False (the default) they are reversed.
.. versionadded:: 1.9.0
Returns
-------
out : ndarray
Vandermonde matrix. If `increasing` is False, the first column is
``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
True, the columns are ``x^0, x^1, ..., x^(N-1)``.
See Also
--------
polynomial.polynomial.polyvander
Examples
--------
>>> x = np.array([1, 2, 3, 5])
>>> N = 3
>>> np.vander(x, N)
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> np.column_stack([x**(N-1-i) for i in range(N)])
array([[ 1, 1, 1],
[ 4, 2, 1],
[ 9, 3, 1],
[25, 5, 1]])
>>> x = np.array([1, 2, 3, 5])
>>> np.vander(x)
array([[ 1, 1, 1, 1],
[ 8, 4, 2, 1],
[ 27, 9, 3, 1],
[125, 25, 5, 1]])
>>> np.vander(x, increasing=True)
array([[ 1, 1, 1, 1],
[ 1, 2, 4, 8],
[ 1, 3, 9, 27],
[ 1, 5, 25, 125]])
The determinant of a square Vandermonde matrix is the product
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
48.000000000000043
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
"""
x = asarray(x)
if x.ndim != 1:
raise ValueError("x must be a one-dimensional array or sequence.")
if N is None:
N = len(x)
v = empty((len(x), N), dtype=promote_types(x.dtype, int))
tmp = v[:, ::-1] if not increasing else v
if N > 0:
tmp[:, 0] = 1
if N > 1:
tmp[:, 1:] = x[:, None]
multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
return v
def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
"""
Compute the bi-dimensional histogram of two data samples.
Parameters
----------
x : array_like, shape (N,)
An array containing the x coordinates of the points to be
histogrammed.
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
* If [int, int], the number of bins in each dimension
(nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
* A combination [int, array] or [array, int], where int
is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
will be considered outliers and not tallied in the histogram.
normed : bool, optional
If False, returns the number of samples in each bin. If True,
returns the bin density ``bin_count / sample_count / bin_area``.
weights : array_like, shape(N,), optional
An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
Weights are normalized to 1 if `normed` is True. If `normed` is
False, the values of the returned histogram are equal to the sum of
the weights belonging to the samples falling into each bin.
Returns
-------
H : ndarray, shape(nx, ny)
The bi-dimensional histogram of samples `x` and `y`. Values in `x`
are histogrammed along the first dimension and values in `y` are
histogrammed along the second dimension.
xedges : ndarray, shape(nx,)
The bin edges along the first dimension.
yedges : ndarray, shape(ny,)
The bin edges along the second dimension.
See Also
--------
histogram : 1D histogram
histogramdd : Multidimensional histogram
Notes
-----
When `normed` is True, then the returned histogram is the sample
density, defined such that the sum over bins of the product
``bin_value * bin_area`` is 1.
Please note that the histogram does not follow the Cartesian convention
where `x` values are on the abscissa and `y` values on the ordinate
axis. Rather, `x` is histogrammed along the first dimension of the
array (vertical), and `y` along the second dimension of the array
(horizontal). This ensures compatibility with `histogramdd`.
Examples
--------
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
Construct a 2D-histogram with variable bin width. First define the bin
edges:
>>> xedges = [0, 1, 1.5, 3, 5]
>>> yedges = [0, 2, 3, 4, 6]
Next we create a histogram H with random bin content:
>>> x = np.random.normal(3, 1, 100)
>>> y = np.random.normal(1, 1, 100)
>>> H, xedges, yedges = np.histogram2d(y, x, bins=(xedges, yedges))
Or we fill the histogram H with a determined bin content:
>>> H = np.ones((4, 4)).cumsum().reshape(4, 4)
>>> print(H[::-1]) # This shows the bin content in the order as plotted
[[ 13. 14. 15. 16.]
[ 9. 10. 11. 12.]
[ 5. 6. 7. 8.]
[ 1. 2. 3. 4.]]
Imshow can only do an equidistant representation of bins:
>>> fig = plt.figure(figsize=(7, 3))
>>> ax = fig.add_subplot(131)
>>> ax.set_title('imshow: equidistant')
>>> im = plt.imshow(H, interpolation='nearest', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
pcolormesh can display exact bin edges:
>>> ax = fig.add_subplot(132)
>>> ax.set_title('pcolormesh: exact bin edges')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
>>> ax.set_aspect('equal')
NonUniformImage displays exact bin edges with interpolation:
>>> ax = fig.add_subplot(133)
>>> ax.set_title('NonUniformImage: interpolated')
>>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = xedges[:-1] + 0.5 * (xedges[1:] - xedges[:-1])
>>> ycenters = yedges[:-1] + 0.5 * (yedges[1:] - yedges[:-1])
>>> im.set_data(xcenters, ycenters, H)
>>> ax.images.append(im)
>>> ax.set_xlim(xedges[0], xedges[-1])
>>> ax.set_ylim(yedges[0], yedges[-1])
>>> ax.set_aspect('equal')
>>> plt.show()
"""
from numpy import histogramdd
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = asarray(bins, float)
bins = [xedges, yedges]
hist, edges = histogramdd([x, y], bins, range, normed, weights)
return hist, edges[0], edges[1]
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
Assume `mask_func` is a function that, for a square array a of size
``(n, n)`` with a possible offset argument `k`, when called as
``mask_func(a, k)`` returns a new array with zeros in certain locations
(functions like `triu` or `tril` do precisely this). Then this function
returns the indices where the non-zero values would be located.
Parameters
----------
n : int
The returned indices will be valid to access arrays of shape (n, n).
mask_func : callable
A function whose call signature is similar to that of `triu`, `tril`.
That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
`k` is an optional argument to the function.
k : scalar
An optional argument which is passed through to `mask_func`. Functions
like `triu`, `tril` take a second argument that is interpreted as an
offset.
Returns
-------
indices : tuple of arrays.
The `n` arrays of indices corresponding to the locations where
``mask_func(np.ones((n, n)), k)`` is True.
See Also
--------
triu, tril, triu_indices, tril_indices
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
These are the indices that would allow you to access the upper triangular
part of any 3x3 array:
>>> iu = np.mask_indices(3, np.triu)
For example, if `a` is a 3x3 array:
>>> a = np.arange(9).reshape(3, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> a[iu]
array([0, 1, 2, 4, 5, 8])
An offset can be passed also to the masking function. This gets us the
indices starting on the first diagonal right of the main one:
>>> iu1 = np.mask_indices(3, np.triu, 1)
with which we now extract only three elements:
>>> a[iu1]
array([1, 2, 5])
"""
m = ones((n, n), int)
a = mask_func(m, k)
return where(a != 0)
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
return where(tri(n, m, k=k, dtype=bool))
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
See `tril_indices` for full details.
Parameters
----------
arr : array_like
The indices will be valid for square arrays whose dimensions are
the same as arr.
k : int, optional
Diagonal offset (see `tril` for details).
See Also
--------
tril_indices, tril
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Notes
-----
.. versionadded:: 1.4.0
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return where(~tri(n, m, k=k-1, dtype=bool))
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
Notes
-----
.. versionadded:: 1.4.0
"""
if arr.ndim != 2:
raise ValueError("input array must be 2-d")
return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
|
JackDandy/SickGear
|
refs/heads/master
|
lib/hachoir_py3/parser/common/win32_lang_id.py
|
2
|
"""
Windows 2000 - List of Locale IDs and Language Groups
Original data table:
http://www.microsoft.com/globaldev/reference/win2k/setup/lcid.mspx
"""
LANGUAGE_ID = {
0x0436: "Afrikaans",
0x041c: "Albanian",
0x0401: "Arabic Saudi Arabia",
0x0801: "Arabic Iraq",
0x0c01: "Arabic Egypt",
0x1001: "Arabic Libya",
0x1401: "Arabic Algeria",
0x1801: "Arabic Morocco",
0x1c01: "Arabic Tunisia",
0x2001: "Arabic Oman",
0x2401: "Arabic Yemen",
0x2801: "Arabic Syria",
0x2c01: "Arabic Jordan",
0x3001: "Arabic Lebanon",
0x3401: "Arabic Kuwait",
0x3801: "Arabic UAE",
0x3c01: "Arabic Bahrain",
0x4001: "Arabic Qatar",
0x042b: "Armenian",
0x042c: "Azeri Latin",
0x082c: "Azeri Cyrillic",
0x042d: "Basque",
0x0423: "Belarusian",
0x0402: "Bulgarian",
0x0403: "Catalan",
0x0404: "Chinese Taiwan",
0x0804: "Chinese PRC",
0x0c04: "Chinese Hong Kong",
0x1004: "Chinese Singapore",
0x1404: "Chinese Macau",
0x041a: "Croatian",
0x0405: "Czech",
0x0406: "Danish",
0x0413: "Dutch Standard",
0x0813: "Dutch Belgian",
0x0409: "English United States",
0x0809: "English United Kingdom",
0x0c09: "English Australian",
0x1009: "English Canadian",
0x1409: "English New Zealand",
0x1809: "English Irish",
0x1c09: "English South Africa",
0x2009: "English Jamaica",
0x2409: "English Caribbean",
0x2809: "English Belize",
0x2c09: "English Trinidad",
0x3009: "English Zimbabwe",
0x3409: "English Philippines",
0x0425: "Estonian",
0x0438: "Faeroese",
0x0429: "Farsi",
0x040b: "Finnish",
0x040c: "French Standard",
0x080c: "French Belgian",
0x0c0c: "French Canadian",
0x100c: "French Swiss",
0x140c: "French Luxembourg",
0x180c: "French Monaco",
0x0437: "Georgian",
0x0407: "German Standard",
0x0807: "German Swiss",
0x0c07: "German Austrian",
0x1007: "German Luxembourg",
0x1407: "German Liechtenstein",
0x0408: "Greek",
0x040d: "Hebrew",
0x0439: "Hindi",
0x040e: "Hungarian",
0x040f: "Icelandic",
0x0421: "Indonesian",
0x0410: "Italian Standard",
0x0810: "Italian Swiss",
0x0411: "Japanese",
0x043f: "Kazakh",
0x0457: "Konkani",
0x0412: "Korean",
0x0426: "Latvian",
0x0427: "Lithuanian",
0x042f: "Macedonian",
0x043e: "Malay Malaysia",
0x083e: "Malay Brunei Darussalam",
0x044e: "Marathi",
0x0414: "Norwegian Bokmal",
0x0814: "Norwegian Nynorsk",
0x0415: "Polish",
0x0416: "Portuguese Brazilian",
0x0816: "Portuguese Standard",
0x0418: "Romanian",
0x0419: "Russian",
0x044f: "Sanskrit",
0x081a: "Serbian Latin",
0x0c1a: "Serbian Cyrillic",
0x041b: "Slovak",
0x0424: "Slovenian",
0x040a: "Spanish Traditional Sort",
0x080a: "Spanish Mexican",
0x0c0a: "Spanish Modern Sort",
0x100a: "Spanish Guatemala",
0x140a: "Spanish Costa Rica",
0x180a: "Spanish Panama",
0x1c0a: "Spanish Dominican Republic",
0x200a: "Spanish Venezuela",
0x240a: "Spanish Colombia",
0x280a: "Spanish Peru",
0x2c0a: "Spanish Argentina",
0x300a: "Spanish Ecuador",
0x340a: "Spanish Chile",
0x380a: "Spanish Uruguay",
0x3c0a: "Spanish Paraguay",
0x400a: "Spanish Bolivia",
0x440a: "Spanish El Salvador",
0x480a: "Spanish Honduras",
0x4c0a: "Spanish Nicaragua",
0x500a: "Spanish Puerto Rico",
0x0441: "Swahili",
0x041d: "Swedish",
0x081d: "Swedish Finland",
0x0449: "Tamil",
0x0444: "Tatar",
0x041e: "Thai",
0x041f: "Turkish",
0x0422: "Ukrainian",
0x0420: "Urdu",
0x0443: "Uzbek Latin",
0x0843: "Uzbek Cyrillic",
0x042a: "Vietnamese",
}
|
unho/translate
|
refs/heads/master
|
translate/misc/file_discovery.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
# Copyright 2014 F Wolff
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
__all__ = ('get_abs_data_filename', )
import os
import sys
def get_abs_data_filename(path_parts, basedirs=None):
"""Get the absolute path to the given file- or directory name in the
current running application's data directory.
:type path_parts: list
:param path_parts: The path parts that can be joined by ``os.path.join()``.
"""
if isinstance(path_parts, str):
path_parts = [path_parts]
DATA_DIRS = [
["..", "share"],
]
BASE_DIRS = basedirs
if not basedirs:
# Useful for running from checkout or similar layout. This will find
# Toolkit's data files
base = os.path.dirname(__file__)
BASE_DIRS = [
base,
os.path.join(base, os.path.pardir),
]
# Freedesktop standard
if 'XDG_DATA_HOME' in os.environ:
BASE_DIRS += [os.environ['XDG_DATA_HOME']]
if 'XDG_DATA_DIRS' in os.environ:
BASE_DIRS += os.environ['XDG_DATA_DIRS'].split(os.path.pathsep)
# Mac OSX app bundles
if 'RESOURCEPATH' in os.environ:
BASE_DIRS += os.environ['RESOURCEPATH'].split(os.path.pathsep)
if getattr(sys, 'frozen', False):
# We know exactly what the layout is when we package for Windows, so
# let's avoid unnecessary paths
DATA_DIRS = [["share"]]
BASE_DIRS = []
BASE_DIRS += [
# installed linux (/usr/bin) as well as Windows
os.path.dirname(sys.executable),
]
for basepath, data_dir in ((x, y) for x in BASE_DIRS for y in DATA_DIRS):
dir_and_filename = data_dir + path_parts
datafile = os.path.join(basepath or os.path.dirname(__file__),
*dir_and_filename)
if os.path.exists(datafile):
return datafile
raise Exception('Could not find "%s"' % (os.path.join(*path_parts)))
|
michealcarrerweb/LHVent_app
|
refs/heads/master
|
LHV_app/settings.py
|
1
|
import os
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
BASE_DIR = PACKAGE_ROOT
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'LHVent_app',
'USER': 'michealcarrer',
'PASSWORD': 'Treehouse1010',
'HOST': 'localhost',
'PORT': '',
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
ALLOWED_HOSTS = [
"localhost",
"127.0.0.1",
]
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "UTC"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en-us"
SITE_ID = int(os.environ.get("SITE_ID", 1))
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "media")
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = "/site_media/media/"
# Absolute path to the directory static files should be collected to.
# Don"t put anything in this directory yourself; store your static files
# in apps" "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PACKAGE_ROOT, "site_media", "static")
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/site_media/static/"
# Additional locations of static files
STATICFILES_DIRS = [
os.path.join(PROJECT_ROOT, "static", "dist"),
]
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage"
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = "hyv*5n&9v&1xql7i7()e+(w5b#07aogy_t6uh#evuhni%xi(@o"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PACKAGE_ROOT, "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"debug": DEBUG,
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.template.context_processors.request",
"django.contrib.messages.context_processors.messages",
"account.context_processors.account",
"pinax_theme_bootstrap.context_processors.theme",
],
},
},
]
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.auth.middleware.SessionAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "LHV_app.urls"
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = "LHV_app.wsgi.application"
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
# theme
"bootstrapform",
"bootstrap3",
"pinax_theme_bootstrap",
# external
"account",
"pinax.eventlog",
"pinax.webanalytics",
"suit",
"versatileimagefield",
"schedule",
"datetimewidget",
"phonenumber_field",
"rest_framework",
# project
"LHV_app",
"company",
"customer_finance",
"equipment",
"finance",
"hourly",
"operation_finance",
"stock",
"service",
"time_log",
"work_order",
]
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {
"()": "django.utils.log.RequireDebugFalse"
}
},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler"
}
},
"loggers": {
"django.request": {
"handlers": ["mail_admins"],
"level": "ERROR",
"propagate": True,
},
}
}
FIXTURE_DIRS = [
os.path.join(PROJECT_ROOT, "fixtures"),
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
ACCOUNT_OPEN_SIGNUP = True
ACCOUNT_EMAIL_UNIQUE = True
ACCOUNT_EMAIL_CONFIRMATION_REQUIRED = False
ACCOUNT_LOGIN_REDIRECT_URL = "home"
ACCOUNT_LOGOUT_REDIRECT_URL = "home"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 2
ACCOUNT_USE_AUTH_AUTHENTICATE = True
AUTHENTICATION_BACKENDS = [
"account.auth_backends.UsernameAuthenticationBackend",
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAdminUser',
],
'PAGE_SIZE': 10
}
|
michalliu/OpenWrt-Firefly-Libraries
|
refs/heads/master
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/idlelib/idle_test/mock_idle.py
|
81
|
'''Mock classes that imitate idlelib modules or classes.
Attributes and methods will be added as needed for tests.
'''
from idlelib.idle_test.mock_tk import Text
class Func:
'''Mock function captures args and returns result set by test.
Attributes:
self.called - records call even if no args, kwds passed.
self.result - set by init, returned by call.
self.args - captures positional arguments.
self.kwds - captures keyword arguments.
Most common use will probably be to mock methods.
Mock_tk.Var and Mbox_func are special variants of this.
'''
def __init__(self, result=None):
self.called = False
self.result = result
self.args = None
self.kwds = None
def __call__(self, *args, **kwds):
self.called = True
self.args = args
self.kwds = kwds
if isinstance(self.result, BaseException):
raise self.result
else:
return self.result
class Editor:
'''Minimally imitate EditorWindow.EditorWindow class.
'''
def __init__(self, flist=None, filename=None, key=None, root=None):
self.text = Text()
self.undo = UndoDelegator()
def get_selection_indices(self):
first = self.text.index('1.0')
last = self.text.index('end')
return first, last
class UndoDelegator:
'''Minimally imitate UndoDelegator,UndoDelegator class.
'''
# A real undo block is only needed for user interaction.
def undo_block_start(*args):
pass
def undo_block_stop(*args):
pass
|
konstruktoid/ansible-upstream
|
refs/heads/devel
|
lib/ansible/modules/cloud/heroku/__init__.py
|
12133432
| |
marionleborgne/cloudbrain_examples
|
refs/heads/master
|
cloudbrain_examples/__init__.py
|
12133432
| |
JNRowe/upoints
|
refs/heads/main
|
upoints/baken.py
|
1
|
#
"""baken - Imports baken data files."""
# Copyright © 2007-2021 James Rowe <jnrowe@gmail.com>
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of upoints.
#
# upoints is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# upoints is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# upoints. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
from configparser import ConfigParser
from . import point, utils
class Baken(point.Point):
"""Class for representing location from baken_ data files.
.. versionadded:: 0.4.0
.. _baken: http://www.qsl.net:80/g4klx/
"""
def __init__(
self,
latitude,
longitude,
antenna=None,
direction=None,
frequency=None,
height=None,
locator=None,
mode=None,
operator=None,
power=None,
qth=None,
):
"""Initialise a new ``Baken`` object.
Args:
latitude (float): Location’s latitude
longitude (float): Location’s longitude
antenna (str): Location’s antenna type
direction (tuple of int): Antenna’s direction
frequency (float): Transmitter’s frequency
height (float): Antenna’s height
locator (str): Location’s Maidenhead locator string
mode (str): Transmitter’s mode
operator (tuple of str): Transmitter’s operator
power (float): Transmitter’s power
qth (str): Location’s qth
Raises:
LookupError: No position data to use
"""
if latitude is not None:
super(Baken, self).__init__(latitude, longitude)
elif locator is not None:
latitude, longitude = utils.from_grid_locator(locator)
super(Baken, self).__init__(latitude, longitude)
else:
raise LookupError(
'Unable to instantiate baken object, no '
'latitude or locator string'
)
self.antenna = antenna
self.direction = direction
self.frequency = frequency
self.height = height
self._locator = locator
self.mode = mode
self.operator = operator
self.power = power
self.qth = qth
@property
def locator(self):
return self._locator
@locator.setter
def locator(self, value):
"""Update the locator, and trigger a latitude and longitude update.
Args:
value (str): New Maidenhead locator string
"""
self._locator = value
self._latitude, self._longitude = utils.from_grid_locator(value)
def __str__(self):
"""Pretty printed location string.
Args:
mode (str): Coordinate formatting system to use
Returns:
str: Human readable string representation of ``Baken`` object
"""
text = super(Baken, self).__format__('dms')
if self._locator:
text = f'{self._locator} ({text})'
return text
class Bakens(point.KeyedPoints):
"""Class for representing a group of :class:`Baken` objects.
.. versionadded:: 0.5.1
"""
def __init__(self, baken_file=None):
"""Initialise a new `Bakens` object."""
super(Bakens, self).__init__()
if baken_file:
self.import_locations(baken_file)
def import_locations(self, baken_file):
"""Import baken data files.
``import_locations()`` returns a dictionary with keys containing the
section title, and values consisting of a collection :class:`Baken`
objects.
It expects data files in the format used by the baken_ amateur radio
package, which is Windows INI style files such as:
.. code-block:: ini
[Abeche, Chad]
latitude=14.460000
longitude=20.680000
height=0.000000
[GB3BUX]
frequency=50.000
locator=IO93BF
power=25 TX
antenna=2 x Turnstile
height=460
mode=A1A
The reader uses the :mod:`configparser` module, so should be reasonably
robust against encodings and such. The above file processed by
``import_locations()`` will return the following ``dict`` object::
{'Abeche, Chad': Baken(14.460, 20.680, None, None, None, 0.000,
None, None, None, None, None),
'GB3BUX': : Baken(None, None, '2 x Turnstile', None, 50.000,
460.000, 'IO93BF', 'A1A', None, 25, None)}
Args::
baken_file (iter): Baken data to read
Returns:
dict: Named locations and their associated values
.. _baken: http://www.qsl.net:80/g4klx/
"""
self._baken_file = baken_file
data = ConfigParser()
if hasattr(baken_file, 'readlines'):
data.read_file(baken_file)
elif isinstance(baken_file, list):
data.read(baken_file)
elif isinstance(baken_file, str):
with open(baken_file) as f:
data.readfp(f)
else:
raise TypeError(
'Unable to handle data of type %r' % type(baken_file)
)
valid_locator = re.compile(r'[A-Z]{2}\d{2}[A-Z]{2}')
for name in data.sections():
elements = {}
for item in (
'latitude',
'longitude',
'antenna',
'direction',
'frequency',
'height',
'locator',
'mode',
'operator',
'power',
'qth',
):
if data.has_option(name, item):
if item in ('antenna', 'locator', 'mode', 'power', 'qth'):
elements[item] = data.get(name, item)
elif item == 'operator':
elements[item] = elements[item].split(',')
elif item == 'direction':
elements[item] = data.get(name, item).split(',')
else:
try:
elements[item] = data.getfloat(name, item)
except ValueError:
logging.debug(
'Multiple frequency workaround for '
'%r entry',
name,
)
elements[item] = map(
float, data.get(name, item).split(',')
)
else:
elements[item] = None
if elements['latitude'] is None and not valid_locator.match(
elements['locator']
):
logging.info(
'Skipping %r entry, as it contains no location ' 'data',
name,
)
continue
self[name] = Baken(**elements)
|
NewpTone/stacklab-nova
|
refs/heads/master
|
nova/tests/api/openstack/test_xmlutil.py
|
14
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from nova.api.openstack import xmlutil
from nova import test
class SelectorTest(test.TestCase):
obj_for_test = {
'test': {
'name': 'test',
'values': [1, 2, 3],
'attrs': {
'foo': 1,
'bar': 2,
'baz': 3,
},
},
}
def test_empty_selector(self):
sel = xmlutil.Selector()
self.assertEqual(len(sel.chain), 0)
self.assertEqual(sel(self.obj_for_test), self.obj_for_test)
def test_dict_selector(self):
sel = xmlutil.Selector('test')
self.assertEqual(len(sel.chain), 1)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel(self.obj_for_test),
self.obj_for_test['test'])
def test_datum_selector(self):
sel = xmlutil.Selector('test', 'name')
self.assertEqual(len(sel.chain), 2)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'name')
self.assertEqual(sel(self.obj_for_test), 'test')
def test_list_selector(self):
sel = xmlutil.Selector('test', 'values', 0)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[0], 'test')
self.assertEqual(sel.chain[1], 'values')
self.assertEqual(sel.chain[2], 0)
self.assertEqual(sel(self.obj_for_test), 1)
def test_items_selector(self):
sel = xmlutil.Selector('test', 'attrs', xmlutil.get_items)
self.assertEqual(len(sel.chain), 3)
self.assertEqual(sel.chain[2], xmlutil.get_items)
for key, val in sel(self.obj_for_test):
self.assertEqual(self.obj_for_test['test']['attrs'][key], val)
def test_missing_key_selector(self):
sel = xmlutil.Selector('test2', 'attrs')
self.assertEqual(sel(self.obj_for_test), None)
self.assertRaises(KeyError, sel, self.obj_for_test, True)
def test_constant_selector(self):
sel = xmlutil.ConstantSelector('Foobar')
self.assertEqual(sel.value, 'Foobar')
self.assertEqual(sel(self.obj_for_test), 'Foobar')
class TemplateElementTest(test.TestCase):
def test_element_initial_attributes(self):
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=dict(a=1, b=2, c=3),
c=4, d=5, e=6)
# Verify all the attributes are as expected
expected = dict(a=1, b=2, c=4, d=5, e=6)
for k, v in expected.items():
self.assertEqual(elem.attrib[k].chain[0], v)
def test_element_get_attributes(self):
expected = dict(a=1, b=2, c=3)
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Verify that get() retrieves the attributes
for k, v in expected.items():
self.assertEqual(elem.get(k).chain[0], v)
def test_element_set_attributes(self):
attrs = dict(a=None, b='foo', c=xmlutil.Selector('foo', 'bar'))
# Create a bare template element with no attributes
elem = xmlutil.TemplateElement('test')
# Set the attribute values
for k, v in attrs.items():
elem.set(k, v)
# Now verify what got set
self.assertEqual(len(elem.attrib['a'].chain), 1)
self.assertEqual(elem.attrib['a'].chain[0], 'a')
self.assertEqual(len(elem.attrib['b'].chain), 1)
self.assertEqual(elem.attrib['b'].chain[0], 'foo')
self.assertEqual(elem.attrib['c'], attrs['c'])
def test_element_attribute_keys(self):
attrs = dict(a=1, b=2, c=3, d=4)
expected = set(attrs.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=attrs)
# Now verify keys
self.assertEqual(set(elem.keys()), expected)
def test_element_attribute_items(self):
expected = dict(a=xmlutil.Selector(1),
b=xmlutil.Selector(2),
c=xmlutil.Selector(3))
keys = set(expected.keys())
# Create a template element with some attributes
elem = xmlutil.TemplateElement('test', attrib=expected)
# Now verify items
for k, v in elem.items():
self.assertEqual(expected[k], v)
keys.remove(k)
# Did we visit all keys?
self.assertEqual(len(keys), 0)
def test_element_selector_none(self):
# Create a template element with no selector
elem = xmlutil.TemplateElement('test')
self.assertEqual(len(elem.selector.chain), 0)
def test_element_selector_string(self):
# Create a template element with a string selector
elem = xmlutil.TemplateElement('test', selector='test')
self.assertEqual(len(elem.selector.chain), 1)
self.assertEqual(elem.selector.chain[0], 'test')
def test_element_selector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit selector
elem = xmlutil.TemplateElement('test', selector=sel)
self.assertEqual(elem.selector, sel)
def test_element_subselector_none(self):
# Create a template element with no subselector
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.subselector, None)
def test_element_subselector_string(self):
# Create a template element with a string subselector
elem = xmlutil.TemplateElement('test', subselector='test')
self.assertEqual(len(elem.subselector.chain), 1)
self.assertEqual(elem.subselector.chain[0], 'test')
def test_element_subselector(self):
sel = xmlutil.Selector('a', 'b')
# Create a template element with an explicit subselector
elem = xmlutil.TemplateElement('test', subselector=sel)
self.assertEqual(elem.subselector, sel)
def test_element_append_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a child element
child = xmlutil.TemplateElement('child')
# Append the child to the parent
elem.append(child)
# Verify that the child was added
self.assertEqual(len(elem), 1)
self.assertEqual(elem[0], child)
self.assertEqual('child' in elem, True)
self.assertEqual(elem['child'], child)
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child')
self.assertRaises(KeyError, elem.append, child2)
def test_element_extend_children(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Verify that the children were added
self.assertEqual(len(elem), 3)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
children2 = [
xmlutil.TemplateElement('child4'),
xmlutil.TemplateElement('child1'),
]
self.assertRaises(KeyError, elem.extend, children2)
# Also ensure that child4 was not added
self.assertEqual(len(elem), 3)
self.assertEqual(elem[-1].tag, 'child3')
def test_element_insert_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a child to insert
child = xmlutil.TemplateElement('child4')
# Insert it
elem.insert(1, child)
# Ensure the child was inserted in the right place
self.assertEqual(len(elem), 4)
children.insert(1, child)
for idx in range(len(elem)):
self.assertEqual(children[idx], elem[idx])
self.assertEqual(children[idx].tag in elem, True)
self.assertEqual(elem[children[idx].tag], children[idx])
# Ensure that multiple children of the same name are rejected
child2 = xmlutil.TemplateElement('child2')
self.assertRaises(KeyError, elem.insert, 2, child2)
def test_element_remove_child(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Make sure the element starts off empty
self.assertEqual(len(elem), 0)
# Create a few children
children = [
xmlutil.TemplateElement('child1'),
xmlutil.TemplateElement('child2'),
xmlutil.TemplateElement('child3'),
]
# Extend the parent by those children
elem.extend(children)
# Create a test child to remove
child = xmlutil.TemplateElement('child2')
# Try to remove it
self.assertRaises(ValueError, elem.remove, child)
# Ensure that no child was removed
self.assertEqual(len(elem), 3)
# Now remove a legitimate child
elem.remove(children[1])
# Ensure that the child was removed
self.assertEqual(len(elem), 2)
self.assertEqual(elem[0], children[0])
self.assertEqual(elem[1], children[2])
self.assertEqual('child2' in elem, False)
# Ensure the child cannot be retrieved by name
def get_key(elem, key):
return elem[key]
self.assertRaises(KeyError, get_key, elem, 'child2')
def test_element_text(self):
# Create an element
elem = xmlutil.TemplateElement('test')
# Ensure that it has no text
self.assertEqual(elem.text, None)
# Try setting it to a string and ensure it becomes a selector
elem.text = 'test'
self.assertEqual(hasattr(elem.text, 'chain'), True)
self.assertEqual(len(elem.text.chain), 1)
self.assertEqual(elem.text.chain[0], 'test')
# Try resetting the text to None
elem.text = None
self.assertEqual(elem.text, None)
# Now make up a selector and try setting the text to that
sel = xmlutil.Selector()
elem.text = sel
self.assertEqual(elem.text, sel)
# Finally, try deleting the text and see what happens
del elem.text
self.assertEqual(elem.text, None)
def test_apply_attrs(self):
# Create a template element
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2))
tmpl_elem = xmlutil.TemplateElement('test', attrib=attrs)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the correct attributes were set
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
def test_apply_text(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.ConstantSelector(1)
# Create an etree element
elem = etree.Element('test')
# Apply the template to the element
tmpl_elem.apply(elem, None)
# Now, verify the text was set
self.assertEqual(str(tmpl_elem.text.value), elem.text)
def test__render(self):
attrs = dict(attr1=xmlutil.ConstantSelector(1),
attr2=xmlutil.ConstantSelector(2),
attr3=xmlutil.ConstantSelector(3))
# Create a master template element
master_elem = xmlutil.TemplateElement('test', attr1=attrs['attr1'])
# Create a couple of slave template element
slave_elems = [
xmlutil.TemplateElement('test', attr2=attrs['attr2']),
xmlutil.TemplateElement('test', attr3=attrs['attr3']),
]
# Try the render
elem = master_elem._render(None, None, slave_elems, None)
# Verify the particulars of the render
self.assertEqual(elem.tag, 'test')
self.assertEqual(len(elem.nsmap), 0)
for k, v in elem.items():
self.assertEqual(str(attrs[k].value), v)
# Create a parent for the element to be rendered
parent = etree.Element('parent')
# Try the render again...
elem = master_elem._render(parent, None, slave_elems, dict(a='foo'))
# Verify the particulars of the render
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], elem)
self.assertEqual(len(elem.nsmap), 1)
self.assertEqual(elem.nsmap['a'], 'foo')
def test_render(self):
# Create a template element
tmpl_elem = xmlutil.TemplateElement('test')
tmpl_elem.text = xmlutil.Selector()
# Create the object we're going to render
obj = ['elem1', 'elem2', 'elem3', 'elem4']
# Try a render with no object
elems = tmpl_elem.render(None, None)
self.assertEqual(len(elems), 0)
# Try a render with one object
elems = tmpl_elem.render(None, 'foo')
self.assertEqual(len(elems), 1)
self.assertEqual(elems[0][0].text, 'foo')
self.assertEqual(elems[0][1], 'foo')
# Now, try rendering an object with multiple entries
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
self.assertEqual(len(elems), 4)
# Check the results
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].text, obj[idx])
self.assertEqual(elems[idx][1], obj[idx])
def test_subelement(self):
# Try the SubTemplateElement constructor
parent = xmlutil.SubTemplateElement(None, 'parent')
self.assertEqual(parent.tag, 'parent')
self.assertEqual(len(parent), 0)
# Now try it with a parent element
child = xmlutil.SubTemplateElement(parent, 'child')
self.assertEqual(child.tag, 'child')
self.assertEqual(len(parent), 1)
self.assertEqual(parent[0], child)
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
self.assertEqual(elem.unwrap(), elem)
self.assertEqual(elem.wrap().root, elem)
def test_dyntag(self):
obj = ['a', 'b', 'c']
# Create a template element with a dynamic tag
tmpl_elem = xmlutil.TemplateElement(xmlutil.Selector())
# Try the render
parent = etree.Element('parent')
elems = tmpl_elem.render(parent, obj)
# Verify the particulars of the render
self.assertEqual(len(elems), len(obj))
for idx in range(len(obj)):
self.assertEqual(elems[idx][0].tag, obj[idx])
class TemplateTest(test.TestCase):
def test_wrap(self):
# These are strange methods, but they make things easier
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
self.assertEqual(tmpl.unwrap(), elem)
self.assertEqual(tmpl.wrap(), tmpl)
def test__siblings(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem)
# Check that we get the right siblings
siblings = tmpl._siblings()
self.assertEqual(len(siblings), 1)
self.assertEqual(siblings[0], elem)
def test__nsmap(self):
# Set up a basic template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.Template(elem, nsmap=dict(a="foo"))
# Check out that we get the right namespace dictionary
nsmap = tmpl._nsmap()
self.assertNotEqual(id(nsmap), id(tmpl.nsmap))
self.assertEqual(len(nsmap), 1)
self.assertEqual(nsmap['a'], 'foo')
def test_master_attach(self):
# Set up a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1)
# Make sure it has a root but no slaves
self.assertEqual(tmpl.root, elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid slave
bad_elem = xmlutil.TemplateElement('test2')
self.assertRaises(ValueError, tmpl.attach, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an invalid and a valid slave
good_elem = xmlutil.TemplateElement('test')
self.assertRaises(ValueError, tmpl.attach, good_elem, bad_elem)
self.assertEqual(len(tmpl.slaves), 0)
# Try to attach an inapplicable template
class InapplicableTemplate(xmlutil.Template):
def apply(self, master):
return False
inapp_tmpl = InapplicableTemplate(good_elem)
tmpl.attach(inapp_tmpl)
self.assertEqual(len(tmpl.slaves), 0)
# Now try attaching an applicable template
tmpl.attach(good_elem)
self.assertEqual(len(tmpl.slaves), 1)
self.assertEqual(tmpl.slaves[0].root, good_elem)
def test_master_copy(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
tmpl = xmlutil.MasterTemplate(elem, 1, nsmap=dict(a='foo'))
# Give it a slave
slave = xmlutil.TemplateElement('test')
tmpl.attach(slave)
# Construct a copy
copy = tmpl.copy()
# Check to see if we actually managed a copy
self.assertNotEqual(tmpl, copy)
self.assertEqual(tmpl.root, copy.root)
self.assertEqual(tmpl.version, copy.version)
self.assertEqual(id(tmpl.nsmap), id(copy.nsmap))
self.assertNotEqual(id(tmpl.slaves), id(copy.slaves))
self.assertEqual(len(tmpl.slaves), len(copy.slaves))
self.assertEqual(tmpl.slaves[0], copy.slaves[0])
def test_slave_apply(self):
# Construct a master template
elem = xmlutil.TemplateElement('test')
master = xmlutil.MasterTemplate(elem, 3)
# Construct a slave template with applicable minimum version
slave = xmlutil.SlaveTemplate(elem, 2)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with equal minimum version
slave = xmlutil.SlaveTemplate(elem, 3)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with inapplicable minimum version
slave = xmlutil.SlaveTemplate(elem, 4)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with applicable version range
slave = xmlutil.SlaveTemplate(elem, 2, 4)
self.assertEqual(slave.apply(master), True)
# Construct a slave template with low version range
slave = xmlutil.SlaveTemplate(elem, 1, 2)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with high version range
slave = xmlutil.SlaveTemplate(elem, 4, 5)
self.assertEqual(slave.apply(master), False)
# Construct a slave template with matching version range
slave = xmlutil.SlaveTemplate(elem, 3, 3)
self.assertEqual(slave.apply(master), True)
def test__serialize(self):
# Our test object to serialize
obj = {
'test': {
'name': 'foobar',
'values': [1, 2, 3, 4],
'attrs': {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
},
'image': {
'name': 'image_foobar',
'id': 42,
},
},
}
# Set up our master template
root = xmlutil.TemplateElement('test', selector='test',
name='name')
value = xmlutil.SubTemplateElement(root, 'value', selector='values')
value.text = xmlutil.Selector()
attrs = xmlutil.SubTemplateElement(root, 'attrs', selector='attrs')
xmlutil.SubTemplateElement(attrs, 'attr', selector=xmlutil.get_items,
key=0, value=1)
master = xmlutil.MasterTemplate(root, 1, nsmap=dict(f='foo'))
# Set up our slave template
root_slave = xmlutil.TemplateElement('test', selector='test')
image = xmlutil.SubTemplateElement(root_slave, 'image',
selector='image', id='id')
image.text = xmlutil.Selector('name')
slave = xmlutil.SlaveTemplate(root_slave, 1, nsmap=dict(b='bar'))
# Attach the slave to the master...
master.attach(slave)
# Try serializing our object
siblings = master._siblings()
nsmap = master._nsmap()
result = master._serialize(None, obj, siblings, nsmap)
# Now we get to manually walk the element tree...
self.assertEqual(result.tag, 'test')
self.assertEqual(len(result.nsmap), 2)
self.assertEqual(result.nsmap['f'], 'foo')
self.assertEqual(result.nsmap['b'], 'bar')
self.assertEqual(result.get('name'), obj['test']['name'])
for idx, val in enumerate(obj['test']['values']):
self.assertEqual(result[idx].tag, 'value')
self.assertEqual(result[idx].text, str(val))
idx += 1
self.assertEqual(result[idx].tag, 'attrs')
for attr in result[idx]:
self.assertEqual(attr.tag, 'attr')
self.assertEqual(attr.get('value'),
str(obj['test']['attrs'][attr.get('key')]))
idx += 1
self.assertEqual(result[idx].tag, 'image')
self.assertEqual(result[idx].get('id'),
str(obj['test']['image']['id']))
self.assertEqual(result[idx].text, obj['test']['image']['name'])
class MasterTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.MasterTemplate(elem, 1)
class SlaveTemplateBuilder(xmlutil.TemplateBuilder):
def construct(self):
elem = xmlutil.TemplateElement('test')
return xmlutil.SlaveTemplate(elem, 1)
class TemplateBuilderTest(test.TestCase):
def test_master_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(MasterTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = MasterTemplateBuilder()
# Make sure that there is a template cached...
self.assertNotEqual(MasterTemplateBuilder._tmpl, None)
# Make sure it wasn't what was returned...
self.assertNotEqual(MasterTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
cached = MasterTemplateBuilder._tmpl
tmpl2 = MasterTemplateBuilder()
self.assertEqual(MasterTemplateBuilder._tmpl, cached)
# Make sure we're always getting fresh copies
self.assertNotEqual(tmpl1, tmpl2)
# Make sure we can override the copying behavior
tmpl3 = MasterTemplateBuilder(False)
self.assertEqual(MasterTemplateBuilder._tmpl, tmpl3)
def test_slave_template_builder(self):
# Make sure the template hasn't been built yet
self.assertEqual(SlaveTemplateBuilder._tmpl, None)
# Now, construct the template
tmpl1 = SlaveTemplateBuilder()
# Make sure there is a template cached...
self.assertNotEqual(SlaveTemplateBuilder._tmpl, None)
# Make sure it was what was returned...
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure it doesn't get rebuilt
tmpl2 = SlaveTemplateBuilder()
self.assertEqual(SlaveTemplateBuilder._tmpl, tmpl1)
# Make sure we're always getting the cached copy
self.assertEqual(tmpl1, tmpl2)
class MiscellaneousXMLUtilTests(test.TestCase):
def test_make_flat_dict(self):
expected_xml = ("<?xml version='1.0' encoding='UTF-8'?>\n"
'<wrapper><a>foo</a><b>bar</b></wrapper>')
root = xmlutil.make_flat_dict('wrapper')
tmpl = xmlutil.MasterTemplate(root, 1)
result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar')))
self.assertEqual(result, expected_xml)
|
paveu/api_mocker
|
refs/heads/develop
|
apimocker/mocker/views.py
|
1
|
# -*- coding: utf-8 -*-
import logging
from django.contrib import messages
from django.utils.decorators import method_decorator
from django.views import View
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import FormView, CreateView
from .enums import SUCCESS_FORM_ACTION_MSG
from .forms import MockerForm
from .utils import Requester, get_hashed_id
from .models import Mocker
logger = logging.getLogger(__name__)
class CreateMockerView(CreateView):
template_name = "create_mocker.html"
form_class = MockerForm
model = Mocker
def dispatch(self, request, *args, **kwargs):
logger.info("create-mocker-view")
return super(CreateMockerView, self).dispatch(request, *args, **kwargs)
class ProcessMockFormView(FormView):
form_class = MockerForm
template_name = "action_status.html"
def dispatch(self, request, *args, **kwargs):
logger.info("process-mock-form-view")
return super(ProcessMockFormView, self).dispatch(request, *args, **kwargs)
def form_valid(self, form):
hashed_id = get_hashed_id()
mocked_url = ''.join([self.request.build_absolute_uri('/'), hashed_id, "/"])
form = form.save(commit=False)
form.hashed_id = hashed_id
form.mocked_address = mocked_url
form.save()
context = {
"destination_url": form.destination_address,
"mocked_url": mocked_url,
"action_msg": SUCCESS_FORM_ACTION_MSG,
}
messages.success(self.request, "Operation completed with success")
return self.render_to_response(context)
def form_invalid(self, form):
messages.warning(self.request, "Something went wrong. Form is not valid")
return self.render_to_response(context={})
class ResolveMockedAddressView(View):
@method_decorator(csrf_exempt)
def dispatch(self, request, *args, **kwargs):
logger.info("resolve-mocked-address-view")
return super(ResolveMockedAddressView, self).dispatch(request, *args, **kwargs)
def _process(self, request, hashed_id):
return Requester(
hashed_id=hashed_id,
requested_http_method=request.method,
requested_content_type=request.content_type,
absolute_uri=request.build_absolute_uri(),
forced_format=request.GET.get('format', ''),
).process_request()
def post(self, request, hashed_id):
return self._process(request, hashed_id)
def get(self, request, hashed_id):
return self._process(request, hashed_id)
def patch(self, request, hashed_id):
return self._process(request, hashed_id)
def put(self, request, hashed_id):
return self._process(request, hashed_id)
def delete(self, request, hashed_id):
return self._process(request, hashed_id)
def head(self, request, hashed_id):
return self._process(request, hashed_id)
def options(self, request, hashed_id):
return self._process(request, hashed_id)
|
achang97/YouTunes
|
refs/heads/master
|
lib/python2.7/site-packages/oauth2client/contrib/xsrfutil.py
|
39
|
# Copyright 2014 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper methods for creating & verifying XSRF tokens."""
import base64
import binascii
import hmac
import time
from oauth2client import _helpers
# Delimiter character
DELIMITER = b':'
# 1 hour in seconds
DEFAULT_TIMEOUT_SECS = 60 * 60
@_helpers.positional(2)
def generate_token(key, user_id, action_id='', when=None):
"""Generates a URL-safe token for the given user, action, time tuple.
Args:
key: secret key to use.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
when: the time in seconds since the epoch at which the user was
authorized for this action. If not set the current time is used.
Returns:
A string XSRF protection token.
"""
digester = hmac.new(_helpers._to_bytes(key, encoding='utf-8'))
digester.update(_helpers._to_bytes(str(user_id), encoding='utf-8'))
digester.update(DELIMITER)
digester.update(_helpers._to_bytes(action_id, encoding='utf-8'))
digester.update(DELIMITER)
when = _helpers._to_bytes(str(when or int(time.time())), encoding='utf-8')
digester.update(when)
digest = digester.digest()
token = base64.urlsafe_b64encode(digest + DELIMITER + when)
return token
@_helpers.positional(3)
def validate_token(key, token, user_id, action_id="", current_time=None):
"""Validates that the given token authorizes the user for the action.
Tokens are invalid if the time of issue is too old or if the token
does not match what generateToken outputs (i.e. the token was forged).
Args:
key: secret key to use.
token: a string of the token generated by generateToken.
user_id: the user ID of the authenticated user.
action_id: a string identifier of the action they requested
authorization for.
Returns:
A boolean - True if the user is authorized for the action, False
otherwise.
"""
if not token:
return False
try:
decoded = base64.urlsafe_b64decode(token)
token_time = int(decoded.split(DELIMITER)[-1])
except (TypeError, ValueError, binascii.Error):
return False
if current_time is None:
current_time = time.time()
# If the token is too old it's not valid.
if current_time - token_time > DEFAULT_TIMEOUT_SECS:
return False
# The given token should match the generated one with the same time.
expected_token = generate_token(key, user_id, action_id=action_id,
when=token_time)
if len(token) != len(expected_token):
return False
# Perform constant time comparison to avoid timing attacks
different = 0
for x, y in zip(bytearray(token), bytearray(expected_token)):
different |= x ^ y
return not different
|
cd334/hangoutsbot
|
refs/heads/master
|
hangupsbot/sinks/generic/simpledemo.py
|
4
|
import time
import json
import base64
import io
import asyncio
import imghdr
import logging
from http.server import BaseHTTPRequestHandler
from urllib.parse import urlparse, parse_qs
from utils import simple_parse_to_segments
class webhookReceiver(BaseHTTPRequestHandler):
_bot = None # set externally by the hangupsbot sink loader
sinkname = "sink"
@asyncio.coroutine
def process_payload(self, path, query_string, payload):
logging.warning("[DEPRECATED] simpledemo.webhookReceiver, use sinks.generic.SimpleMessagePoster")
sinkname = self.sinkname
path = path.split("/")
conversation_id = path[1]
if conversation_id is None:
print("{}: conversation id must be provided as part of path".format(sinkname))
return
image_id = None
if "image" in payload:
image_data = False
image_filename = False
image_type = 'unknown'
if "base64encoded" in payload["image"]:
raw = base64.b64decode(payload["image"]["base64encoded"], None, True)
image_data = io.BytesIO(raw)
image_type = imghdr.what('ignore', raw)
if not image_type:
image_type = 'error'
if "filename" in payload["image"]:
image_filename = payload["image"]["filename"]
else:
image_filename = str(int(time.time())) + "." + image_type
print("{}: uploading image: {}".format(sinkname, image_filename))
image_id = yield from webhookReceiver._bot._client.upload_image(image_data, filename=image_filename)
html = ""
if "echo" in payload:
html = payload["echo"]
else:
# placeholder text
html = "<b>hello world</b>"
segments = simple_parse_to_segments(html)
print("{} sending segments: {}".format(sinkname, len(segments)))
yield from self._bot.coro_send_message(conversation_id, segments, context=None, image_id=image_id)
def do_POST(self):
logging.warning("[DEPRECATED] simpledemo.webhookReceiver, use sinks.generic.SimpleMessagePoster")
sinkname = self.sinkname
print('{}: receiving POST...'.format(sinkname))
data_string = self.rfile.read(int(self.headers['Content-Length'])).decode('UTF-8')
self.send_response(200)
message = bytes('OK', 'UTF-8')
self.send_header("Content-type", "text")
self.send_header("Content-length", str(len(message)))
self.end_headers()
self.wfile.write(message)
print('{}: connection closed'.format(sinkname))
# parse requested path + query string
_parsed = urlparse(self.path)
path = _parsed.path
query_string = parse_qs(_parsed.query)
print("{}: incoming path: {}".format(sinkname, path))
print("{}: incoming data: approx {} bytes".format(sinkname, len(data_string)))
# parse incoming data
payload = json.loads(data_string)
# process the payload
asyncio.async(self.process_payload(path, query_string, payload))
|
hardanimal/UFT_UPGEM
|
refs/heads/master
|
src/UFT_GUI/log_handler.py
|
1
|
#!/usr/bin/env python
# encoding: utf-8
"""Description: Initilize the logger
"""
__version__ = "0.1"
__author__ = "@mzfa"
import sys
import logging
from PyQt4 import QtCore
class XStream(QtCore.QObject):
_stdout = None
_stderr = None
messageWritten = QtCore.pyqtSignal(str)
def flush(self):
pass
def fileno(self):
return -1
def write(self, msg):
if not self.signalsBlocked():
self.messageWritten.emit(unicode(msg))
@staticmethod
def stdout():
if not XStream._stdout:
XStream._stdout = XStream()
sys.stdout = XStream._stdout
return XStream._stdout
@staticmethod
def stderr():
if not XStream._stderr:
XStream._stderr = XStream()
sys.stderr = XStream._stderr
return XStream._stderr
class QtHandler(logging.Handler):
def __init__(self):
logging.Handler.__init__(self)
def emit(self, record):
record = self.format(record)
if record:
# XStream.stdout().write('%s' % record)
XStream.stdout().write("{}\n".format(record))
|
nikolas/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/regressiontests/forms/__init__.py
|
12133432
| |
MaxTyutyunnikov/lino
|
refs/heads/master
|
lino/modlib/courses/fixtures/demo.py
|
1
|
# -*- coding: UTF-8 -*-
## Copyright 2012-2013 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import datetime
#~ from django.contrib.contenttypes.models import ContentType
from lino import dd
#~ from lino.utils.instantiator import Instantiator, i2d
#~ from lino.core.dbutils import resolve_model
from lino.utils import mti, Cycler
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.conf import settings
from north.dbutils import babelkw
Person = dd.resolve_model('contacts.Person')
courses = dd.resolve_app('courses')
cal = dd.resolve_app('cal')
users = dd.resolve_app('users')
#~ Room = resolve_model('courses.Room')
#~ Content = resolve_model('courses.Content')
#~ PresenceStatus = resolve_model('courses.PresenceStatus')
def objects():
#~ yield courses.Room(name="A")
#~ yield cal.Place(name="A")
#~ yield cal.Place(name="B")
#~ yield cal.Place(name="C")
#~ yield cal.Place(name="D")
#~ yield cal.Place(name="E")
#~ yield cal.Place(name="F")
PTYPES = Cycler(courses.PupilType.objects.all())
TTYPES = Cycler(courses.TeacherType.objects.all())
n = 0
for p in Person.objects.all():
if n % 2 == 0:
yield mti.insert_child(p,courses.Pupil,pupil_type=PTYPES.pop())
if n % 9 == 0:
yield mti.insert_child(p,courses.Teacher,teacher_type=TTYPES.pop())
n += 1
if False:
#~ PS = Cycler(courses.PresenceStatus.objects.all())
CONTENTS = Cycler(courses.Line.objects.all())
USERS = Cycler(users.User.objects.all())
PLACES = Cycler(cal.Room.objects.all())
TEACHERS = Cycler(courses.Teacher.objects.all())
SLOTS = Cycler(courses.Slot.objects.all())
#~ SLOTS = Cycler(1,2,3,4)
PUPILS = Cycler(courses.Pupil.objects.all())
#~ Event = settings.SITE.modules.cal.Event
#~ from lino.modlib.cal.utils import DurationUnit
year = settings.SITE.demo_date().year
if settings.SITE.demo_date().month < 7:
year -= 1
for i in range(10):
c = courses.Course(
user=USERS.pop(),
teacher=TEACHERS.pop(),
line=CONTENTS.pop(),room=PLACES.pop(),
start_date=datetime.date(year,9,1+i),
end_date=datetime.date(year+1,6,30),
every=1,
every_unit=cal.DurationUnits.weeks,
slot=SLOTS.pop(),
)
yield c
for j in range(5):
yield courses.Enrolment(pupil=PUPILS.pop(),course=c)
c.save() # fill presences
#~ for j in range(5):
#~ yield courses.Event(start_date=settings.SITE.demo_date(j*7),course=c)
#~ yield courses.Presence()
|
y-j-n/pyFastfusion
|
refs/heads/master
|
modeler/util.py
|
1
|
import os
import errno
import shutil
import sys
import math
import numpy as np
from numpy import linalg as LA
from PySide import QtCore, QtGui, QtOpenGL
import time
from contextlib import contextmanager
# http://stackoverflow.com/questions/2327719/timing-block-of-code-in-python-without-putting-it-in-a-function
@contextmanager
def util_measure_time(title):
t1 = time.clock()
yield
t2 = time.clock()
print '%s: %0.2f seconds elapsed' % (title, t2-t1)
# http://stackoverflow.com/questions/1823058/how-to-print-number-with-commas-as-thousands-separators
def util_format_k(num):
return '{:,}'.format(num)
def util_format_2f(num):
return '{0:.2f}'.format(num)
def util_format_3f(num):
return '{0:.3f}'.format(num)
# http://stackoverflow.com/questions/13199126/find-opengl-rotation-matrix-for-a-plane-given-the-normal-vector-after-the-rotat
def util_calc_plane_rot_trans(li_abcd):
# calculate rotation
n = np.array([0, 0, 1])
norm_abc = LA.norm(li_abcd[0:3])
nn = np.array(li_abcd[0:3]) / norm_abc
rot_axis = np.cross(n, nn)
rot_angle = math.degrees(math.acos(np.dot(n, nn)))
# calculate translation
# dist_abs_from_origin = abs(li_abcd[3]) / norm_abc
dist_abs_from_origin = -li_abcd[3] / norm_abc
trans = dist_abs_from_origin * nn
# print trans
return rot_axis, rot_angle, trans
def util_create_empty_dir(dirname):
shutil.rmtree(dirname, ignore_errors=True)
try:
os.makedirs(dirname)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def util_qimage_to_ndarray(qimage):
image_in = qimage.convertToFormat(QtGui.QImage.Format.Format_RGB32)
width = image_in.width()
height = image_in.height()
print width, height
# http://stackoverflow.com/questions/19902183/qimage-to-numpy-array-using-pyside
# ptr = image_in.bits()
ptr = image_in.constBits()
print image_in.byteCount() # 1228800
if sys.platform == 'darwin':
arr = np.array(ptr).reshape(height, width, 4) # Copies the data
# print np.array(ptr).shape # (1228800,) i.e. 640*480*4
else:
print type(ptr) # 'buffer'
# http://stackoverflow.com/questions/11760095/convert-binary-string-to-numpy-array
# http://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
arr = np.frombuffer(ptr, dtype='b')
print arr # fixme: color is broken........................
print arr.size
print arr.shape
arr = arr.reshape(height, width, 4)
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.delete.html
# remove alpha channel
#print arr.shape # (480, 640, 4)
arr = np.delete(arr, np.s_[3], 2)
#print arr.shape # (480, 640, 3)
return arr
|
shubhdev/edxOnBaadal
|
refs/heads/master
|
common/lib/capa/capa/inputtypes.py
|
1
|
#
# File: courseware/capa/inputtypes.py
#
"""
Module containing the problem elements which render into input objects
- textline
- textbox (aka codeinput)
- schematic
- choicegroup (aka radiogroup, checkboxgroup)
- javascriptinput
- imageinput (for clickable image)
- optioninput (for option list)
- filesubmission (upload a file)
- crystallography
- vsepr_input
- drag_and_drop
- formulaequationinput
- chemicalequationinput
These are matched by *.html files templates/*.html which are mako templates with the
actual html.
Each input type takes the xml tree as 'element', the previous answer as 'value', and the
graded status as'status'
"""
# TODO: make hints do something
# TODO: make all inputtypes actually render msg
# TODO: remove unused fields (e.g. 'hidden' in a few places)
# TODO: add validators so that content folks get better error messages.
# Possible todo: make inline the default for textlines and other "one-line" inputs. It probably
# makes sense, but a bunch of problems have markup that assumes block. Bigger TODO: figure out a
# general css and layout strategy for capa, document it, then implement it.
import time
import json
import logging
from lxml import etree
import re
import shlex # for splitting quoted strings
import sys
import pyparsing
import html5lib
import bleach
from .util import sanitize_html
from .registry import TagRegistry
from chem import chemcalc
from calc.preview import latex_preview
import xqueue_interface
from xqueue_interface import XQUEUE_TIMEOUT
from datetime import datetime
from xmodule.stringify import stringify_children
log = logging.getLogger(__name__)
#########################################################################
registry = TagRegistry() # pylint: disable=invalid-name
class Status(object):
"""
Problem status
attributes: classname, display_name, display_tooltip
"""
css_classes = {
# status: css class
'unsubmitted': 'unanswered',
'incomplete': 'incorrect',
'queued': 'processing',
}
__slots__ = ('classname', '_status', 'display_name', 'display_tooltip')
def __init__(self, status, gettext_func=unicode):
self.classname = self.css_classes.get(status, status)
_ = gettext_func
names = {
'correct': _('correct'),
'incorrect': _('incorrect'),
'incomplete': _('incomplete'),
'unanswered': _('unanswered'),
'unsubmitted': _('unanswered'),
'queued': _('processing'),
}
tooltips = {
# Translators: these are tooltips that indicate the state of an assessment question
'correct': _('This is correct.'),
'incorrect': _('This is incorrect.'),
'unanswered': _('This is unanswered.'),
'unsubmitted': _('This is unanswered.'),
'queued': _('This is being processed.'),
}
self.display_name = names.get(status, unicode(status))
self.display_tooltip = tooltips.get(status, u'')
self._status = status or ''
def __str__(self):
return self._status
def __unicode__(self):
return self._status.decode('utf8')
def __repr__(self):
return 'Status(%r)' % self._status
def __eq__(self, other):
return self._status == str(other)
class Attribute(object):
"""
Allows specifying required and optional attributes for input types.
"""
# want to allow default to be None, but also allow required objects
_sentinel = object()
def __init__(self, name, default=_sentinel, transform=None, validate=None, render=True):
"""
Define an attribute
name (str): then name of the attribute--should be alphanumeric (valid for an XML attribute)
default (any type): If not specified, this attribute is required. If specified, use this as the default value
if the attribute is not specified. Note that this value will not be transformed or validated.
transform (function str -> any type): If not None, will be called to transform the parsed value into an internal
representation.
validate (function str-or-return-type-of-tranform -> unit or exception): If not None, called to validate the
(possibly transformed) value of the attribute. Should raise ValueError with a helpful message if
the value is invalid.
render (bool): if False, don't include this attribute in the template context.
"""
self.name = name
self.default = default
self.validate = validate
self.transform = transform
self.render = render
def parse_from_xml(self, element):
"""
Given an etree xml element that should have this attribute, do the obvious thing:
- look for it. raise ValueError if not found and required.
- transform and validate. pass through any exceptions from transform or validate.
"""
val = element.get(self.name)
if self.default == self._sentinel and val is None:
raise ValueError(
'Missing required attribute {0}.'.format(self.name)
)
if val is None:
# not required, so return default
return self.default
if self.transform is not None:
val = self.transform(val)
if self.validate is not None:
self.validate(val)
return val
class InputTypeBase(object):
"""
Abstract base class for input types.
"""
template = None
def __init__(self, system, xml, state):
"""
Instantiate an InputType class. Arguments:
- system : LoncapaModule instance which provides OS, rendering, and user context.
Specifically, must have a render_template function.
- xml : Element tree of this Input element
- state : a dictionary with optional keys:
* 'value' -- the current value of this input
(what the student entered last time)
* 'id' -- the id of this input, typically
"{problem-location}_{response-num}_{input-num}"
* 'status' (answered, unanswered, unsubmitted)
* 'input_state' -- dictionary containing any inputtype-specific state
that has been preserved
* 'feedback' (dictionary containing keys for hints, errors, or other
feedback from previous attempt. Specifically 'message', 'hint',
'hintmode'. If 'hintmode' is 'always', the hint is always displayed.)
"""
self.xml = xml
self.tag = xml.tag
self.capa_system = system
# NOTE: ID should only come from one place. If it comes from multiple,
# we use state first, XML second (in case the xml changed, but we have
# existing state with an old id). Since we don't make this guarantee,
# we can swap this around in the future if there's a more logical
# order.
self.input_id = state.get('id', xml.get('id'))
if self.input_id is None:
raise ValueError(
"input id state is None. xml is {0}".format(etree.tostring(xml))
)
self.value = state.get('value', '')
feedback = state.get('feedback', {})
self.msg = feedback.get('message', '')
self.hint = feedback.get('hint', '')
self.hintmode = feedback.get('hintmode', None)
self.input_state = state.get('input_state', {})
self.answervariable = state.get("answervariable", None)
# put hint above msg if it should be displayed
if self.hintmode == 'always':
self.msg = self.hint + ('<br/>' if self.msg else '') + self.msg
self.status = state.get('status', 'unanswered')
try:
# Pre-parse and process all the declared requirements.
self.process_requirements()
# Call subclass "constructor" -- means they don't have to worry about calling
# super().__init__, and are isolated from changes to the input
# constructor interface.
self.setup()
except Exception as err:
# Something went wrong: add xml to message, but keep the traceback
msg = u"Error in xml '{x}': {err} ".format(
x=etree.tostring(xml), err=err.message)
raise Exception, msg, sys.exc_info()[2]
@classmethod
def get_attributes(cls):
"""
Should return a list of Attribute objects (see docstring there for details). Subclasses should override. e.g.
return [Attribute('unicorn', True), Attribute('num_dragons', 12, transform=int), ...]
"""
return []
def process_requirements(self):
"""
Subclasses can declare lists of required and optional attributes. This
function parses the input xml and pulls out those attributes. This
isolates most simple input types from needing to deal with xml parsing at all.
Processes attributes, putting the results in the self.loaded_attributes dictionary. Also creates a set
self.to_render, containing the names of attributes that should be included in the context by default.
"""
# Use local dicts and sets so that if there are exceptions, we don't
# end up in a partially-initialized state.
loaded = {}
to_render = set()
for attribute in self.get_attributes():
loaded[attribute.name] = attribute.parse_from_xml(self.xml)
if attribute.render:
to_render.add(attribute.name)
self.loaded_attributes = loaded
self.to_render = to_render
def setup(self):
"""
InputTypes should override this to do any needed initialization. It is called after the
constructor, so all base attributes will be set.
If this method raises an exception, it will be wrapped with a message that includes the
problem xml.
"""
pass
def handle_ajax(self, dispatch, data):
"""
InputTypes that need to handle specialized AJAX should override this.
Input:
dispatch: a string that can be used to determine how to handle the data passed in
data: a dictionary containing the data that was sent with the ajax call
Output:
a dictionary object that can be serialized into JSON. This will be sent back to the Javascript.
"""
pass
def _get_render_context(self):
"""
Should return a dictionary of keys needed to render the template for the input type.
(Separate from get_html to faciliate testing of logic separately from the rendering)
The default implementation gets the following rendering context: basic things like value, id, status, and msg,
as well as everything in self.loaded_attributes, and everything returned by self._extra_context().
This means that input types that only parse attributes and pass them to the template get everything they need,
and don't need to override this method.
"""
context = {
'id': self.input_id,
'value': self.value,
'status': Status(self.status, self.capa_system.i18n.ugettext),
'msg': self.msg,
'STATIC_URL': self.capa_system.STATIC_URL,
}
context.update(
(a, v) for (a, v) in self.loaded_attributes.iteritems() if a in self.to_render
)
context.update(self._extra_context())
if self.answervariable:
context.update({'answervariable': self.answervariable})
return context
def _extra_context(self):
"""
Subclasses can override this to return extra context that should be passed to their templates for rendering.
This is useful when the input type requires computing new template variables from the parsed attributes.
"""
return {}
def get_html(self):
"""
Return the html for this input, as an etree element.
"""
if self.template is None:
raise NotImplementedError("no rendering template specified for class {0}"
.format(self.__class__))
context = self._get_render_context()
html = self.capa_system.render_template(self.template, context)
try:
output = etree.XML(html)
except etree.XMLSyntaxError as ex:
# If `html` contains attrs with no values, like `controls` in <audio controls src='smth'/>,
# XML parser will raise exception, so wee fallback to html5parser, which will set empty "" values for such attrs.
try:
output = html5lib.parseFragment(html, treebuilder='lxml', namespaceHTMLElements=False)[0]
except IndexError:
raise ex
return output
def get_user_visible_answer(self, internal_answer):
"""
Given the internal representation of the answer provided by the user, return the representation of the answer
as the user saw it. Subclasses should override this method if and only if the internal represenation of the
answer is different from the answer that is displayed to the user.
"""
return internal_answer
#-----------------------------------------------------------------------------
@registry.register
class OptionInput(InputTypeBase):
"""
Input type for selecting and Select option input type.
Example:
<optioninput options="('Up','Down')" label="Where is the sky?" correct="Up"/><text>The location of the sky</text>
# TODO: allow ordering to be randomized
"""
template = "optioninput.html"
tags = ['optioninput']
@staticmethod
def parse_options(options):
"""
Given options string, convert it into an ordered list of (option_id, option_description) tuples, where
id==description for now. TODO: make it possible to specify different id and descriptions.
"""
# convert single quotes inside option values to html encoded string
options = re.sub(r"([a-zA-Z])('|\\')([a-zA-Z])", r"\1'\3", options)
options = re.sub(r"\\'", r"'", options) # replace already escaped single quotes
# parse the set of possible options
lexer = shlex.shlex(options[1:-1].encode('utf8'))
lexer.quotes = "'"
# Allow options to be separated by whitespace as well as commas
lexer.whitespace = ", "
# remove quotes
# convert escaped single quotes (html encoded string) back to single quotes
tokens = [x[1:-1].decode('utf8').replace("'", "'") for x in lexer]
# make list of (option_id, option_description), with description=id
return [(t, t) for t in tokens]
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [Attribute('options', transform=cls.parse_options),
Attribute('label', ''),
Attribute('inline', False)]
#-----------------------------------------------------------------------------
# TODO: consolidate choicegroup, radiogroup, checkboxgroup after discussion of
# desired semantics.
@registry.register
class ChoiceGroup(InputTypeBase):
"""
Radio button or checkbox inputs: multiple choice or true/false
TODO: allow order of choices to be randomized, following lon-capa spec. Use
"location" attribute, ie random, top, bottom.
Example:
<choicegroup label="Which foil?">
<choice correct="false" name="foil1">
<text>This is foil One.</text>
</choice>
<choice correct="false" name="foil2">
<text>This is foil Two.</text>
</choice>
<choice correct="true" name="foil3">
<text>This is foil Three.</text>
</choice>
</choicegroup>
"""
template = "choicegroup.html"
tags = ['choicegroup', 'radiogroup', 'checkboxgroup']
def setup(self):
i18n = self.capa_system.i18n
# suffix is '' or [] to change the way the input is handled in --as a scalar or vector
# value. (VS: would be nice to make this less hackish).
if self.tag == 'choicegroup':
self.suffix = ''
self.html_input_type = "radio"
elif self.tag == 'radiogroup':
self.html_input_type = "radio"
self.suffix = '[]'
elif self.tag == 'checkboxgroup':
self.html_input_type = "checkbox"
self.suffix = '[]'
else:
_ = i18n.ugettext
# Translators: 'ChoiceGroup' is an input type and should not be translated.
msg = _("ChoiceGroup: unexpected tag {tag_name}").format(tag_name=self.tag)
raise Exception(msg)
self.choices = self.extract_choices(self.xml, i18n)
self._choices_map = dict(self.choices,) # pylint: disable=attribute-defined-outside-init
@classmethod
def get_attributes(cls):
_ = lambda text: text
return [Attribute("show_correctness", "always"),
Attribute('label', ''),
Attribute("submitted_message", _("Answer received."))]
def _extra_context(self):
return {'input_type': self.html_input_type,
'choices': self.choices,
'name_array_suffix': self.suffix}
@staticmethod
def extract_choices(element, i18n):
"""
Extracts choices for a few input types, such as ChoiceGroup, RadioGroup and
CheckboxGroup.
returns list of (choice_name, choice_text) tuples
TODO: allow order of choices to be randomized, following lon-capa spec. Use
"location" attribute, ie random, top, bottom.
"""
choices = []
_ = i18n.ugettext
for choice in element:
if choice.tag == 'choice':
choices.append((choice.get("name"), stringify_children(choice)))
else:
if choice.tag != 'compoundhint':
msg = u'[capa.inputtypes.extract_choices] {error_message}'.format(
# Translators: '<choice>' and '<compoundhint>' are tag names and should not be translated.
error_message=_('Expected a <choice> or <compoundhint> tag; got {given_tag} instead').format(
given_tag=choice.tag
)
)
raise Exception(msg)
return choices
def get_user_visible_answer(self, internal_answer):
if isinstance(internal_answer, basestring):
return self._choices_map[internal_answer]
return [self._choices_map[i] for i in internal_answer]
#-----------------------------------------------------------------------------
@registry.register
class JavascriptInput(InputTypeBase):
"""
Hidden field for javascript to communicate via; also loads the required
scripts for rendering the problem and passes data to the problem.
TODO (arjun?): document this in detail. Initial notes:
- display_class is a subclass of XProblemClassDisplay (see
xmodule/xmodule/js/src/capa/display.coffee),
- display_file is the js script to be in /static/js/ where display_class is defined.
"""
template = "javascriptinput.html"
tags = ['javascriptinput']
@classmethod
def get_attributes(cls):
"""
Register the attributes.
"""
return [Attribute('params', None),
Attribute('problem_state', None),
Attribute('display_class', None),
Attribute('display_file', None), ]
def setup(self):
# Need to provide a value that JSON can parse if there is no
# student-supplied value yet.
if self.value == "":
self.value = 'null'
#-----------------------------------------------------------------------------
@registry.register
class JSInput(InputTypeBase):
"""
Inputtype for general javascript inputs. Intended to be used with
customresponse.
Loads in a sandboxed iframe to help prevent css and js conflicts between
frame and top-level window.
iframe sandbox whitelist:
- allow-scripts
- allow-popups
- allow-forms
- allow-pointer-lock
This in turn means that the iframe cannot directly access the top-level
window elements.
Example:
<jsinput html_file="/static/test.html"
gradefn="grade"
height="500"
width="400"/>
See the documentation in docs/data/source/course_data_formats/jsinput.rst
for more information.
"""
template = "jsinput.html"
tags = ['jsinput']
@classmethod
def get_attributes(cls):
"""
Register the attributes.
"""
return [
Attribute('params', None), # extra iframe params
Attribute('html_file', None),
Attribute('gradefn', "gradefn"),
Attribute('get_statefn', None), # Function to call in iframe
# to get current state.
Attribute('initial_state', None), # JSON string to be used as initial state
Attribute('set_statefn', None), # Function to call iframe to
# set state
Attribute('width', "400"), # iframe width
Attribute('height', "300"), # iframe height
Attribute('sop', None) # SOP will be relaxed only if this
# attribute is set to false.
]
def _extra_context(self):
context = {
'jschannel_loader': '{static_url}js/capa/src/jschannel.js'.format(
static_url=self.capa_system.STATIC_URL),
'jsinput_loader': '{static_url}js/capa/src/jsinput.js'.format(
static_url=self.capa_system.STATIC_URL),
'saved_state': self.value
}
return context
#-----------------------------------------------------------------------------
@registry.register
class TextLine(InputTypeBase):
"""
A text line input. Can do math preview if "math"="1" is specified.
If "trailing_text" is set to a value, then the textline will be shown with
the value after the text input, and before the checkmark or any input-specific
feedback. HTML will not work, but properly escaped HTML characters will. This
feature is useful if you would like to specify a specific type of units for the
text input.
If the hidden attribute is specified, the textline is hidden and the input id
is stored in a div with name equal to the value of the hidden attribute. This
is used e.g. for embedding simulations turned into questions.
Example:
<textline math="1" trailing_text="m/s" label="How fast is a cheetah?" />
This example will render out a text line with a math preview and the text 'm/s'
after the end of the text line.
"""
template = "textline.html"
tags = ['textline']
@classmethod
def get_attributes(cls):
"""
Register the attributes.
"""
return [
Attribute('size', None),
Attribute('label', ''),
Attribute('hidden', False),
Attribute('inline', False),
# Attributes below used in setup(), not rendered directly.
Attribute('math', None, render=False),
# TODO: 'dojs' flag is temporary, for backwards compatibility with
# 8.02x
Attribute('dojs', None, render=False),
Attribute('preprocessorClassName', None, render=False),
Attribute('preprocessorSrc', None, render=False),
Attribute('trailing_text', ''),
]
def setup(self):
self.do_math = bool(self.loaded_attributes['math'] or
self.loaded_attributes['dojs'])
# TODO: do math checking using ajax instead of using js, so
# that we only have one math parser.
self.preprocessor = None
if self.do_math:
# Preprocessor to insert between raw input and Mathjax
self.preprocessor = {
'class_name': self.loaded_attributes['preprocessorClassName'],
'script_src': self.loaded_attributes['preprocessorSrc'],
}
if None in self.preprocessor.values():
self.preprocessor = None
def _extra_context(self):
return {'do_math': self.do_math,
'preprocessor': self.preprocessor, }
#-----------------------------------------------------------------------------
@registry.register
class FileSubmission(InputTypeBase):
"""
Upload some files (e.g. for programming assignments)
"""
template = "filesubmission.html"
tags = ['filesubmission']
@staticmethod
def parse_files(files):
"""
Given a string like 'a.py b.py c.out', split on whitespace and return as a json list.
"""
return json.dumps(files.split())
@classmethod
def get_attributes(cls):
"""
Convert the list of allowed files to a convenient format.
"""
return [Attribute('allowed_files', '[]', transform=cls.parse_files),
Attribute('label', ''),
Attribute('required_files', '[]', transform=cls.parse_files), ]
def setup(self):
"""
Do some magic to handle queueing status (render as "queued" instead of "incomplete"),
pull queue_len from the msg field. (TODO: get rid of the queue_len hack).
"""
_ = self.capa_system.i18n.ugettext
submitted_msg = _("Your files have been submitted. As soon as your submission is"
" graded, this message will be replaced with the grader's feedback.")
self.submitted_msg = submitted_msg
# Check if problem has been queued
self.queue_len = 0
# Flag indicating that the problem has been queued, 'msg' is length of
# queue
if self.status == 'incomplete':
self.status = 'queued'
self.queue_len = self.msg
self.msg = self.submitted_msg
def _extra_context(self):
return {'queue_len': self.queue_len, }
#-----------------------------------------------------------------------------
@registry.register
class CodeInput(InputTypeBase):
"""
A text area input for code--uses codemirror, does syntax highlighting, special tab handling,
etc.
"""
template = "codeinput.html"
tags = [
'codeinput',
'textbox',
# Another (older) name--at some point we may want to make it use a
# non-codemirror editor.
]
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [
Attribute('rows', '30'),
Attribute('cols', '80'),
Attribute('hidden', ''),
# For CodeMirror
Attribute('mode', 'python'),
Attribute('readonly',''),
Attribute('linenumbers', 'true'),
# Template expects tabsize to be an int it can do math with
Attribute('tabsize', 4, transform=int),
]
def setup_code_response_rendering(self):
"""
Implement special logic: handle queueing state, and default input.
"""
# if no student input yet, then use the default input given by the
# problem
if not self.value and self.xml.text:
self.value = self.xml.text.strip()
# Check if problem has been queued
self.queue_len = 0
# Flag indicating that the problem has been queued, 'msg' is length of
# queue
if self.status == 'incomplete':
self.status = 'queued'
self.queue_len = self.msg
self.msg = bleach.clean(self.submitted_msg)
def setup(self):
""" setup this input type """
_ = self.capa_system.i18n.ugettext
submitted_msg = _("Your answer has been submitted. As soon as your submission is"
" graded, this message will be replaced with the grader's feedback.")
self.submitted_msg = submitted_msg
self.setup_code_response_rendering()
def _extra_context(self):
"""Defined queue_len, add it """
return {'queue_len': self.queue_len, }
#-----------------------------------------------------------------------------
@registry.register
class MatlabInput(CodeInput):
"""
InputType for handling Matlab code input
Example:
<matlabinput rows="10" cols="80" tabsize="4">
Initial Text
</matlabinput>
"""
template = "matlabinput.html"
tags = ['matlabinput']
def setup(self):
"""
Handle matlab-specific parsing
"""
_ = self.capa_system.i18n.ugettext
submitted_msg = _("Submitted. As soon as a response is returned, "
"this message will be replaced by that feedback.")
self.submitted_msg = submitted_msg
self.setup_code_response_rendering()
xml = self.xml
self.plot_payload = xml.findtext('./plot_payload')
# Check if problem has been queued
self.queuename = 'matlab'
self.queue_msg = ''
# this is only set if we don't have a graded response
# the graded response takes precedence
if 'queue_msg' in self.input_state and self.status in ['queued', 'incomplete', 'unsubmitted']:
self.queue_msg = sanitize_html(self.input_state['queue_msg'])
if 'queuestate' in self.input_state and self.input_state['queuestate'] == 'queued':
self.status = 'queued'
self.queue_len = 1
self.msg = self.submitted_msg
# Handle situation if no response from xqueue arrived during specified time.
if ('queuetime' not in self.input_state or
time.time() - self.input_state['queuetime'] > XQUEUE_TIMEOUT):
self.queue_len = 0
self.status = 'unsubmitted'
self.msg = _(
'No response from Xqueue within {xqueue_timeout} seconds. Aborted.'
).format(xqueue_timeout=XQUEUE_TIMEOUT)
def handle_ajax(self, dispatch, data):
"""
Handle AJAX calls directed to this input
Args:
- dispatch (str) - indicates how we want this ajax call to be handled
- data (dict) - dictionary of key-value pairs that contain useful data
Returns:
dict - 'success' - whether or not we successfully queued this submission
- 'message' - message to be rendered in case of error
"""
if dispatch == 'plot':
return self._plot_data(data)
return {}
def ungraded_response(self, queue_msg, queuekey):
"""
Handle the response from the XQueue
Stores the response in the input_state so it can be rendered later
Args:
- queue_msg (str) - message returned from the queue. The message to be rendered
- queuekey (str) - a key passed to the queue. Will be matched up to verify that this is the response we're waiting for
Returns:
nothing
"""
# check the queuekey against the saved queuekey
if('queuestate' in self.input_state and self.input_state['queuestate'] == 'queued'
and self.input_state['queuekey'] == queuekey):
msg = self._parse_data(queue_msg)
# save the queue message so that it can be rendered later
self.input_state['queue_msg'] = msg
self.input_state['queuestate'] = None
self.input_state['queuekey'] = None
def button_enabled(self):
""" Return whether or not we want the 'Test Code' button visible
Right now, we only want this button to show up when a problem has not been
checked.
"""
if self.status in ['correct', 'incorrect']:
return False
else:
return True
def _extra_context(self):
""" Set up additional context variables"""
_ = self.capa_system.i18n.ugettext
queue_msg = self.queue_msg
if len(self.queue_msg) > 0: # An empty string cannot be parsed as XML but is okay to include in the template.
try:
etree.XML(u'<div>{0}</div>'.format(self.queue_msg))
except etree.XMLSyntaxError:
try:
html5lib.parseFragment(self.queue_msg, treebuilder='lxml', namespaceHTMLElements=False)[0]
except (IndexError, ValueError):
# If neither can parse queue_msg, it contains invalid xml.
queue_msg = u"<span>{0}</span>".format(_("Error running code."))
extra_context = {
'queue_len': str(self.queue_len),
'queue_msg': queue_msg,
'button_enabled': self.button_enabled(),
'matlab_editor_js': '{static_url}js/vendor/CodeMirror/octave.js'.format(
static_url=self.capa_system.STATIC_URL),
'msg': sanitize_html(self.msg) # sanitize msg before rendering into template
}
return extra_context
def _parse_data(self, queue_msg):
"""
Parses the message out of the queue message
Args:
queue_msg (str) - a JSON encoded string
Returns:
returns the value for the the key 'msg' in queue_msg
"""
try:
result = json.loads(queue_msg)
except (TypeError, ValueError):
log.error("External message should be a JSON serialized dict."
" Received queue_msg = %s", queue_msg)
raise
msg = result['msg']
return msg
def _plot_data(self, data):
"""
AJAX handler for the plot button
Args:
get (dict) - should have key 'submission' which contains the student submission
Returns:
dict - 'success' - whether or not we successfully queued this submission
- 'message' - message to be rendered in case of error
"""
_ = self.capa_system.i18n.ugettext
# only send data if xqueue exists
if self.capa_system.xqueue is None:
return {'success': False, 'message': _('Cannot connect to the queue')}
# pull relevant info out of get
response = data['submission']
# construct xqueue headers
qinterface = self.capa_system.xqueue['interface']
qtime = datetime.utcnow().strftime(xqueue_interface.dateformat)
callback_url = self.capa_system.xqueue['construct_callback']('ungraded_response')
anonymous_student_id = self.capa_system.anonymous_student_id
# TODO: Why is this using self.capa_system.seed when we have self.seed???
queuekey = xqueue_interface.make_hashkey(str(self.capa_system.seed) + qtime +
anonymous_student_id +
self.input_id)
xheader = xqueue_interface.make_xheader(
lms_callback_url=callback_url,
lms_key=queuekey,
queue_name=self.queuename)
# construct xqueue body
student_info = {
'anonymous_student_id': anonymous_student_id,
'submission_time': qtime
}
contents = {
'grader_payload': self.plot_payload,
'student_info': json.dumps(student_info),
'student_response': response,
'token': getattr(self.capa_system, 'matlab_api_key', None),
'endpoint_version': "2",
'requestor_id': anonymous_student_id,
}
(error, msg) = qinterface.send_to_queue(header=xheader,
body=json.dumps(contents))
# save the input state if successful
if error == 0:
self.input_state['queuekey'] = queuekey
self.input_state['queuestate'] = 'queued'
self.input_state['queuetime'] = time.time()
return {'success': error == 0, 'message': msg}
#-----------------------------------------------------------------------------
@registry.register
class Schematic(InputTypeBase):
"""
InputType for the schematic editor
"""
template = "schematicinput.html"
tags = ['schematic']
@classmethod
def get_attributes(cls):
"""
Convert options to a convenient format.
"""
return [
Attribute('height', None),
Attribute('width', None),
Attribute('parts', None),
Attribute('analyses', None),
Attribute('initial_value', None),
Attribute('submit_analyses', None),
Attribute('label', ''),
]
def _extra_context(self):
context = {
'setup_script': '{static_url}js/capa/schematicinput.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#-----------------------------------------------------------------------------
@registry.register
class ImageInput(InputTypeBase):
"""
Clickable image as an input field. Element should specify the image source, height,
and width, e.g.
<imageinput src="/static/Figures/Skier-conservation-of-energy.jpg" width="388" height="560" />
TODO: showanswer for imageimput does not work yet - need javascript to put rectangle
over acceptable area of image.
"""
template = "imageinput.html"
tags = ['imageinput']
@classmethod
def get_attributes(cls):
"""
Note: src, height, and width are all required.
"""
return [Attribute('src'),
Attribute('height'),
Attribute('label', ''),
Attribute('width'), ]
def setup(self):
"""
if value is of the form [x,y] then parse it and send along coordinates of previous answer
"""
m = re.match(r'\[([0-9]+),([0-9]+)]',
self.value.strip().replace(' ', ''))
if m:
# Note: we subtract 15 to compensate for the size of the dot on the screen.
# (is a 30x30 image--lms/static/images/green-pointer.png).
(self.gx, self.gy) = [int(x) - 15 for x in m.groups()]
else:
(self.gx, self.gy) = (0, 0)
def _extra_context(self):
return {'gx': self.gx,
'gy': self.gy}
#-----------------------------------------------------------------------------
@registry.register
class Crystallography(InputTypeBase):
"""
An input for crystallography -- user selects 3 points on the axes, and we get a plane.
TODO: what's the actual value format?
"""
template = "crystallography.html"
tags = ['crystallography']
@classmethod
def get_attributes(cls):
"""
Note: height, width are required.
"""
return [Attribute('height'),
Attribute('width'),
]
# -------------------------------------------------------------------------
@registry.register
class VseprInput(InputTypeBase):
"""
Input for molecular geometry--show possible structures, let student
pick structure and label positions with atoms or electron pairs.
"""
template = 'vsepr_input.html'
tags = ['vsepr_input']
@classmethod
def get_attributes(cls):
"""
Note: height, width, molecules and geometries are required.
"""
return [Attribute('height'),
Attribute('width'),
Attribute('molecules'),
Attribute('geometries'),
]
#-------------------------------------------------------------------------
@registry.register
class ChemicalEquationInput(InputTypeBase):
"""
An input type for entering chemical equations. Supports live preview.
Example:
<chemicalequationinput size="50"/>
options: size -- width of the textbox.
"""
template = "chemicalequationinput.html"
tags = ['chemicalequationinput']
@classmethod
def get_attributes(cls):
"""
Can set size of text field.
"""
return [Attribute('size', '20'),
Attribute('label', ''), ]
def _extra_context(self):
"""
TODO (vshnayder): Get rid of this once we have a standard way of requiring js to be loaded.
"""
return {
'previewer': '{static_url}js/capa/chemical_equation_preview.js'.format(
static_url=self.capa_system.STATIC_URL),
}
def handle_ajax(self, dispatch, data):
"""
Since we only have chemcalc preview this input, check to see if it
matches the corresponding dispatch and send it through if it does
"""
if dispatch == 'preview_chemcalc':
return self.preview_chemcalc(data)
return {}
def preview_chemcalc(self, data):
"""
Render an html preview of a chemical formula or equation. get should
contain a key 'formula' and value 'some formula string'.
Returns a json dictionary:
{
'preview' : 'the-preview-html' or ''
'error' : 'the-error' or ''
}
"""
_ = self.capa_system.i18n.ugettext
result = {'preview': '',
'error': ''}
try:
formula = data['formula']
except KeyError:
result['error'] = _("No formula specified.")
return result
try:
result['preview'] = chemcalc.render_to_html(formula)
except pyparsing.ParseException as err:
result['error'] = _("Couldn't parse formula: {error_msg}").format(error_msg=err.msg)
except Exception:
# this is unexpected, so log
log.warning(
"Error while previewing chemical formula", exc_info=True)
result['error'] = _("Error while rendering preview")
return result
#-------------------------------------------------------------------------
@registry.register
class FormulaEquationInput(InputTypeBase):
"""
An input type for entering formula equations. Supports live preview.
Example:
<formulaequationinput size="50" label="Enter the equation for motion"/>
options: size -- width of the textbox.
"""
template = "formulaequationinput.html"
tags = ['formulaequationinput']
@classmethod
def get_attributes(cls):
"""
Can set size of text field.
"""
return [
Attribute('size', '20'),
Attribute('inline', False),
Attribute('label', ''),
]
def _extra_context(self):
"""
TODO (vshnayder): Get rid of 'previewer' once we have a standard way of requiring js to be loaded.
"""
# `reported_status` is basically `status`, except we say 'unanswered'
return {
'previewer': '{static_url}js/capa/src/formula_equation_preview.js'.format(
static_url=self.capa_system.STATIC_URL),
}
def handle_ajax(self, dispatch, get):
"""
Since we only have formcalc preview this input, check to see if it
matches the corresponding dispatch and send it through if it does
"""
if dispatch == 'preview_formcalc':
return self.preview_formcalc(get)
return {}
def preview_formcalc(self, get):
"""
Render an preview of a formula or equation. `get` should
contain a key 'formula' with a math expression.
Returns a json dictionary:
{
'preview' : '<some latex>' or ''
'error' : 'the-error' or ''
'request_start' : <time sent with request>
}
"""
_ = self.capa_system.i18n.ugettext
result = {'preview': '',
'error': ''}
try:
formula = get['formula']
except KeyError:
result['error'] = _("No formula specified.")
return result
result['request_start'] = int(get.get('request_start', 0))
try:
# TODO add references to valid variables and functions
# At some point, we might want to mark invalid variables as red
# or something, and this is where we would need to pass those in.
result['preview'] = latex_preview(formula)
except pyparsing.ParseException as err:
result['error'] = _("Sorry, couldn't parse formula")
result['formula'] = formula
except Exception:
# this is unexpected, so log
log.warning(
"Error while previewing formula", exc_info=True
)
result['error'] = _("Error while rendering preview")
return result
#-----------------------------------------------------------------------------
@registry.register
class DragAndDropInput(InputTypeBase):
"""
Input for drag and drop problems. Allows student to drag and drop images and
labels to base image.
"""
template = 'drag_and_drop_input.html'
tags = ['drag_and_drop_input']
def setup(self):
def parse(tag, tag_type):
"""Parses <tag ... /> xml element to dictionary. Stores
'draggable' and 'target' tags with attributes to dictionary and
returns last.
Args:
tag: xml etree element <tag...> with attributes
tag_type: 'draggable' or 'target'.
If tag_type is 'draggable' : all attributes except id
(name or label or icon or can_reuse) are optional
If tag_type is 'target' all attributes (name, x, y, w, h)
are required. (x, y) - coordinates of center of target,
w, h - weight and height of target.
Returns:
Dictionary of vaues of attributes:
dict{'name': smth, 'label': smth, 'icon': smth,
'can_reuse': smth}.
"""
tag_attrs = dict()
tag_attrs['draggable'] = {
'id': Attribute._sentinel,
'label': "", 'icon': "",
'can_reuse': ""
}
tag_attrs['target'] = {
'id': Attribute._sentinel,
'x': Attribute._sentinel,
'y': Attribute._sentinel,
'w': Attribute._sentinel,
'h': Attribute._sentinel
}
dic = dict()
for attr_name in tag_attrs[tag_type].keys():
dic[attr_name] = Attribute(attr_name,
default=tag_attrs[tag_type][attr_name]).parse_from_xml(tag)
if tag_type == 'draggable' and not self.no_labels:
dic['label'] = dic['label'] or dic['id']
if tag_type == 'draggable':
dic['target_fields'] = [parse(target, 'target') for target in
tag.iterchildren('target')]
return dic
# add labels to images?:
self.no_labels = Attribute('no_labels',
default="False").parse_from_xml(self.xml)
to_js = dict()
# image drag and drop onto
to_js['base_image'] = Attribute('img').parse_from_xml(self.xml)
# outline places on image where to drag adn drop
to_js['target_outline'] = Attribute('target_outline',
default="False").parse_from_xml(self.xml)
# one draggable per target?
to_js['one_per_target'] = Attribute('one_per_target',
default="True").parse_from_xml(self.xml)
# list of draggables
to_js['draggables'] = [parse(draggable, 'draggable') for draggable in
self.xml.iterchildren('draggable')]
# list of targets
to_js['targets'] = [parse(target, 'target') for target in
self.xml.iterchildren('target')]
# custom background color for labels:
label_bg_color = Attribute('label_bg_color',
default=None).parse_from_xml(self.xml)
if label_bg_color:
to_js['label_bg_color'] = label_bg_color
self.loaded_attributes['drag_and_drop_json'] = json.dumps(to_js)
self.to_render.add('drag_and_drop_json')
#-------------------------------------------------------------------------
@registry.register
class EditAMoleculeInput(InputTypeBase):
"""
An input type for edit-a-molecule. Integrates with the molecule editor java applet.
Example:
<editamolecule size="50"/>
options: size -- width of the textbox.
"""
template = "editamolecule.html"
tags = ['editamoleculeinput']
@classmethod
def get_attributes(cls):
"""
Can set size of text field.
"""
return [Attribute('file'),
Attribute('missing', None)]
def _extra_context(self):
context = {
'applet_loader': '{static_url}js/capa/editamolecule.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#-----------------------------------------------------------------------------
@registry.register
class DesignProtein2dInput(InputTypeBase):
"""
An input type for design of a protein in 2D. Integrates with the Protex java applet.
Example:
<designprotein2d width="800" hight="500" target_shape="E;NE;NW;W;SW;E;none" />
"""
template = "designprotein2dinput.html"
tags = ['designprotein2dinput']
@classmethod
def get_attributes(cls):
"""
Note: width, hight, and target_shape are required.
"""
return [Attribute('width'),
Attribute('height'),
Attribute('target_shape')
]
def _extra_context(self):
context = {
'applet_loader': '{static_url}js/capa/design-protein-2d.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#-----------------------------------------------------------------------------
@registry.register
class EditAGeneInput(InputTypeBase):
"""
An input type for editing a gene.
Integrates with the genex GWT application.
Example:
<editagene genex_dna_sequence="CGAT" genex_problem_number="1"/>
"""
template = "editageneinput.html"
tags = ['editageneinput']
@classmethod
def get_attributes(cls):
"""
Note: width, height, and dna_sequencee are required.
"""
return [Attribute('genex_dna_sequence'),
Attribute('genex_problem_number')
]
def _extra_context(self):
context = {
'applet_loader': '{static_url}js/capa/edit-a-gene.js'.format(
static_url=self.capa_system.STATIC_URL),
}
return context
#---------------------------------------------------------------------
@registry.register
class AnnotationInput(InputTypeBase):
"""
Input type for annotations: students can enter some notes or other text
(currently ungraded), and then choose from a set of tags/optoins, which are graded.
Example:
<annotationinput>
<title>Annotation Exercise</title>
<text>
They are the ones who, at the public assembly, had put savage derangement [ate] into my thinking
[phrenes] |89 on that day when I myself deprived Achilles of his honorific portion [geras]
</text>
<comment>Agamemnon says that ate or 'derangement' was the cause of his actions: why could Zeus say the same thing?</comment>
<comment_prompt>Type a commentary below:</comment_prompt>
<tag_prompt>Select one tag:</tag_prompt>
<options>
<option choice="correct">ate - both a cause and an effect</option>
<option choice="incorrect">ate - a cause</option>
<option choice="partially-correct">ate - an effect</option>
</options>
</annotationinput>
# TODO: allow ordering to be randomized
"""
template = "annotationinput.html"
tags = ['annotationinput']
def setup(self):
xml = self.xml
self.debug = False # set to True to display extra debug info with input
self.return_to_annotation = True # return only works in conjunction with annotatable xmodule
self.title = xml.findtext('./title', 'Annotation Exercise')
self.text = xml.findtext('./text')
self.comment = xml.findtext('./comment')
self.comment_prompt = xml.findtext(
'./comment_prompt', 'Type a commentary below:')
self.tag_prompt = xml.findtext('./tag_prompt', 'Select one tag:')
self.options = self._find_options()
# Need to provide a value that JSON can parse if there is no
# student-supplied value yet.
if self.value == '':
self.value = 'null'
self._validate_options()
def _find_options(self):
""" Returns an array of dicts where each dict represents an option. """
elements = self.xml.findall('./options/option')
return [{
'id': index,
'description': option.text,
'choice': option.get('choice')
} for (index, option) in enumerate(elements)]
def _validate_options(self):
""" Raises a ValueError if the choice attribute is missing or invalid. """
valid_choices = ('correct', 'partially-correct', 'incorrect')
for option in self.options:
choice = option['choice']
if choice is None:
raise ValueError('Missing required choice attribute.')
elif choice not in valid_choices:
raise ValueError('Invalid choice attribute: {0}. Must be one of: {1}'.format(
choice, ', '.join(valid_choices)))
def _unpack(self, json_value):
""" Unpacks the json input state into a dict. """
d = json.loads(json_value)
if not isinstance(d, dict):
d = {}
comment_value = d.get('comment', '')
if not isinstance(comment_value, basestring):
comment_value = ''
options_value = d.get('options', [])
if not isinstance(options_value, list):
options_value = []
return {
'options_value': options_value,
'has_options_value': len(options_value) > 0, # for convenience
'comment_value': comment_value,
}
def _extra_context(self):
extra_context = {
'title': self.title,
'text': self.text,
'comment': self.comment,
'comment_prompt': self.comment_prompt,
'tag_prompt': self.tag_prompt,
'options': self.options,
'return_to_annotation': self.return_to_annotation,
'debug': self.debug
}
extra_context.update(self._unpack(self.value))
return extra_context
@registry.register
class ChoiceTextGroup(InputTypeBase):
"""
Groups of radiobutton/checkboxes with text inputs.
Examples:
RadioButton problem
<problem>
<startouttext/>
A person rolls a standard die 100 times and records the results.
On the first roll they received a "1". Given this information
select the correct choice and fill in numbers to make it accurate.
<endouttext/>
<choicetextresponse>
<radiotextgroup label="What is the correct choice?">
<choice correct="false">The lowest number rolled was:
<decoy_input/> and the highest number rolled was:
<decoy_input/> .</choice>
<choice correct="true">The lowest number rolled was <numtolerance_input answer="1"/>
and there is not enough information to determine the highest number rolled.
</choice>
<choice correct="false">There is not enough information to determine the lowest
number rolled, and the highest number rolled was:
<decoy_input/> .
</choice>
</radiotextgroup>
</choicetextresponse>
</problem>
CheckboxProblem:
<problem>
<startouttext/>
A person randomly selects 100 times, with replacement, from the list of numbers \(\sqrt{2}\) , 2, 3, 4 ,5 ,6
and records the results. The first number they pick is \(\sqrt{2}\) Given this information
select the correct choices and fill in numbers to make them accurate.
<endouttext/>
<choicetextresponse>
<checkboxtextgroup label="What is the answer?">
<choice correct="true">
The lowest number selected was <numtolerance_input answer="1.4142" tolerance="0.01"/>
</choice>
<choice correct="false">
The highest number selected was <decoy_input/> .
</choice>
<choice correct="true">There is not enough information given to determine the highest number
which was selected.
</choice>
<choice correct="false">There is not enough information given to determine the lowest number
selected.
</choice>
</checkboxtextgroup>
</choicetextresponse>
</problem>
In the preceding examples the <decoy_input/> is used to generate a textinput html element
in the problem's display. Since it is inside of an incorrect choice, no answer given
for it will be correct, and thus specifying an answer for it is not needed.
"""
template = "choicetext.html"
tags = ['radiotextgroup', 'checkboxtextgroup']
def setup(self):
"""
Performs setup for the initial rendering of the problem.
`self.html_input_type` determines whether this problem is displayed
with radiobuttons or checkboxes
If the initial value of `self.value` is '' change it to {} so that
the template has an empty dictionary to work with.
sets the value of self.choices to be equal to the return value of
`self.extract_choices`
"""
self.text_input_values = {}
if self.tag == 'radiotextgroup':
self.html_input_type = "radio"
elif self.tag == 'checkboxtextgroup':
self.html_input_type = "checkbox"
else:
_ = self.capa_system.i18n.ugettext
msg = _("{input_type}: unexpected tag {tag_name}").format(
input_type="ChoiceTextGroup", tag_name=self.tag
)
raise Exception(msg)
if self.value == '':
# Make `value` an empty dictionary, if it currently has an empty
# value. This is necessary because the template expects a
# dictionary.
self.value = {}
self.choices = self.extract_choices(self.xml, self.capa_system.i18n)
@classmethod
def get_attributes(cls):
"""
Returns a list of `Attribute` for this problem type
"""
_ = lambda text: text
return [
Attribute("show_correctness", "always"),
Attribute("submitted_message", _("Answer received.")),
Attribute("label", ""),
]
def _extra_context(self):
"""
Returns a dictionary of extra content necessary for rendering this InputType.
`input_type` is either 'radio' or 'checkbox' indicating whether the choices for
this problem will have radiobuttons or checkboxes.
"""
return {
'input_type': self.html_input_type,
'choices': self.choices
}
@staticmethod
def extract_choices(element, i18n):
"""
Extracts choices from the xml for this problem type.
If we have xml that is as follows(choice names will have been assigned
by now)
<radiotextgroup>
<choice correct = "true" name ="1_2_1_choiceinput_0bc">
The number
<numtolerance_input name = "1_2_1_choiceinput0_numtolerance_input_0" answer="5"/>
Is the mean of the list.
</choice>
<choice correct = "false" name = "1_2_1_choiceinput_1bc>
False demonstration choice
</choice>
</radiotextgroup>
Choices are used for rendering the problem properly
The function will setup choices as follows:
choices =[
("1_2_1_choiceinput_0bc",
[{'type': 'text', 'contents': "The number", 'tail_text': '',
'value': ''
},
{'type': 'textinput',
'contents': "1_2_1_choiceinput0_numtolerance_input_0",
'tail_text': 'Is the mean of the list',
'value': ''
}
]
),
("1_2_1_choiceinput_1bc",
[{'type': 'text', 'contents': "False demonstration choice",
'tail_text': '',
'value': ''
}
]
)
]
"""
_ = i18n.ugettext
choices = []
for choice in element:
if choice.tag != 'choice':
msg = u"[capa.inputtypes.extract_choices] {0}".format(
# Translators: a "tag" is an XML element, such as "<b>" in HTML
_("Expected a {expected_tag} tag; got {given_tag} instead").format(
expected_tag=u"<choice>",
given_tag=choice.tag,
)
)
raise Exception(msg)
components = []
choice_text = ''
if choice.text is not None:
choice_text += choice.text
# Initialize our dict for the next content
adder = {
'type': 'text',
'contents': choice_text,
'tail_text': '',
'value': ''
}
components.append(adder)
for elt in choice:
# for elements in the choice e.g. <text> <numtolerance_input>
adder = {
'type': 'text',
'contents': '',
'tail_text': '',
'value': ''
}
tag_type = elt.tag
# If the current `elt` is a <numtolerance_input> set the
# `adder`type to 'numtolerance_input', and 'contents' to
# the `elt`'s name.
# Treat decoy_inputs and numtolerance_inputs the same in order
# to prevent students from reading the Html and figuring out
# which inputs are valid
if tag_type in ('numtolerance_input', 'decoy_input'):
# We set this to textinput, so that we get a textinput html
# element.
adder['type'] = 'textinput'
adder['contents'] = elt.get('name')
else:
adder['contents'] = elt.text
# Add any tail text("is the mean" in the example)
adder['tail_text'] = elt.tail if elt.tail else ''
components.append(adder)
# Add the tuple for the current choice to the list of choices
choices.append((choice.get("name"), components))
return choices
|
czpython/django-cms
|
refs/heads/develop
|
cms/tests/test_publisher.py
|
1
|
# -*- coding: utf-8 -*-
from djangocms_text_ckeditor.models import Text
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.management.base import CommandError
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.utils.translation import override as force_language
from cms.api import create_page, add_plugin, create_title
from cms.constants import PUBLISHER_STATE_PENDING, PUBLISHER_STATE_DEFAULT, PUBLISHER_STATE_DIRTY
from cms.management.commands.subcommands.publisher_publish import PublishCommand
from cms.models import CMSPlugin, Page, TreeNode, Title
from cms.plugin_pool import plugin_pool
from cms.test_utils.testcases import CMSTestCase as TestCase
from cms.test_utils.util.context_managers import StdoutOverride
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils.urlutils import admin_reverse
class PublisherCommandTests(TestCase):
"""
Tests for the publish command
"""
def test_command_line_should_raise_without_superuser(self):
with self.assertRaises(CommandError):
com = PublishCommand()
com.handle()
def test_command_line_publishes_zero_pages_on_empty_db(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
def test_command_line_ignores_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 0)
self.assertEqual(published_from_output, 0)
self.assertEqual(Page.objects.public().count(), 0)
def test_command_line_publishes_draft_page(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
create_page("The page!", "nav_playground.html", "en", published=False)
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
def test_command_line_publishes_selected_language(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = True
title.save()
title = create_title('fr', 'fr title', page)
title.published = True
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish', language='de')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_command_line_publishes_selected_language_drafts(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
page = create_page("en title", "nav_playground.html", "en")
title = create_title('de', 'de title', page)
title.published = False
title.save()
title = create_title('fr', 'fr title', page)
title.published = False
title.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish', language='de', include_unpublished=True)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
self.assertEqual(Page.objects.public().count(), 1)
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de'])
def test_table_name_patching(self):
"""
This tests the plugin models patching when publishing from the command line
"""
User = get_user_model()
User.objects.create_superuser('djangocms', 'cms@example.com', '123456')
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
draft.publish('en')
add_plugin(draft.placeholders.get(slot=u"body"),
u"TextPlugin", u"en", body="Test content")
# Manually undoing table name patching
Text._meta.db_table = 'djangocms_text_ckeditor_text'
plugin_pool.patched = False
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish')
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
def test_command_line_publishes_one_page(self):
"""
Publisher always creates two Page objects for every CMS page,
one is_draft and one is_public.
The public version of the page can be either published or not.
This bit of code uses sometimes manager methods and sometimes manual
filters on purpose (this helps test the managers)
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
# Now, let's create a page. That actually creates 2 Page objects
create_page("The page!", "nav_playground.html", "en", published=True)
draft = Page.objects.drafts()[0]
draft.reverse_id = 'a_test' # we have to change *something*
draft.save()
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
# Sanity check the database (we should have one draft and one public)
not_drafts = len(Page.objects.filter(publisher_is_draft=False))
drafts = len(Page.objects.filter(publisher_is_draft=True))
self.assertEqual(not_drafts, 1)
self.assertEqual(drafts, 1)
# Now check that the non-draft has the attribute we set to the draft.
non_draft = Page.objects.public()[0]
self.assertEqual(non_draft.reverse_id, 'a_test')
def test_command_line_publish_multiple_languages(self):
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
# Create a draft page with two published titles
page = create_page(u"The page!", "nav_playground.html", "en", published=False)
title = create_title('de', 'ja', page)
title.published = True
title.save()
title = create_title('fr', 'non', page)
title.published = True
title.save()
with StdoutOverride():
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish')
public = Page.objects.public()[0]
languages = sorted(public.title_set.values_list('language', flat=True))
self.assertEqual(languages, ['de', 'fr'])
def test_command_line_publish_one_site(self):
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
siteA = Site.objects.create(domain='a.example.com', name='a.example.com')
siteB = Site.objects.create(domain='b.example.com', name='b.example.com')
#example.com
create_page(u"example.com homepage", "nav_playground.html", "en", published=True)
#a.example.com
create_page(u"a.example.com homepage", "nav_playground.html", "de", site=siteA, published=True)
#b.example.com
create_page(u"b.example.com homepage", "nav_playground.html", "de", site=siteB, published=True)
create_page(u"b.example.com about", "nav_playground.html", "nl", site=siteB, published=True)
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish', site=siteB.id)
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 2)
self.assertEqual(published_from_output, 2)
def test_command_line_publish_multiple_languages_check_count(self):
"""
Publishing one page with multiple languages still counts
as one page. This test case checks whether it works
as expected.
"""
# we need to create a superuser (the db is empty)
get_user_model().objects.create_superuser('djangocms', 'cms@example.com', '123456')
# Now, let's create a page with 2 languages.
page = create_page("en title", "nav_playground.html", "en", published=True)
create_title("de", "de title", page)
page.publish("de")
pages_from_output = 0
published_from_output = 0
with StdoutOverride() as buffer:
# Now we don't expect it to raise, but we need to redirect IO
call_command('cms', 'publisher-publish')
lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work
for line in lines:
if 'Total' in line:
pages_from_output = int(line.split(':')[1])
elif 'Published' in line:
published_from_output = int(line.split(':')[1])
self.assertEqual(pages_from_output, 1)
self.assertEqual(published_from_output, 1)
class PublishingTests(TestCase):
def create_page(self, title=None, **kwargs):
return create_page(title or self._testMethodName,
"nav_playground.html", "en", **kwargs)
def test_publish_single(self):
name = self._testMethodName
drafts = Page.objects.drafts()
public = Page.objects.public()
page = self.create_page(name, published=False)
create_title('de', 'de-page', page)
create_title('fr', 'fr-page', page)
self.assertNeverPublished(page)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(public.published(language="en"), title_set__title=name)
page.publish("en")
self.assertPublished(page.reload())
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(public, title_set__title=name)
self.assertFalse(public.published(language="de").exists())
self.assertFalse(public.published(language="fr").exists())
self.assertSequenceEqual(page.publisher_public.get_languages(), ['en'])
def test_publish_admin(self):
name = 'test_admin'
drafts = Page.objects.drafts()
public = Page.objects.public()
page = self.create_page(name, published=False)
create_title('de', 'de-page', page)
create_title('fr', 'fr-page', page)
self.assertNeverPublished(page)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(public, title_set__title=name)
self.assertObjectDoesNotExist(public.published(language="en"), title_set__title=name)
with self.login_user_context(self.get_superuser()):
response = self.client.post(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = page.reload()
self.assertPublished(page)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(public, title_set__title=name)
self.assertFalse(public.published(language="de").exists())
self.assertFalse(public.published(language="fr").exists())
self.assertSequenceEqual(page.publisher_public.get_languages(), ['en'])
def test_publish_wrong_lang(self):
page = self.create_page("test_admin", published=False)
superuser = self.get_superuser()
with self.settings(
LANGUAGES=(('de', 'de'), ('en', 'en')),
CMS_LANGUAGES={1: [{'code': 'en', 'name': 'en', 'fallbacks': ['fr', 'de'], 'public': True}]}
):
with self.login_user_context(superuser):
with force_language('de'):
response = self.client.post(admin_reverse("cms_page_publish_page", args=[page.pk, 'en']))
self.assertEqual(response.status_code, 302)
page = Page.objects.get(pk=page.pk)
def test_publish_missing_page(self):
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.post(admin_reverse("cms_page_publish_page", args=[999999, 'en']))
self.assertEqual(response.status_code, 404)
def test_publish_child_first(self):
parent = self.create_page('parent', published=False)
child = self.create_page('child', published=True)
child.move_page(parent.node, 'last-child')
drafts = Page.objects.drafts()
public = Page.objects.public()
self.assertPending(child.reload())
self.assertNeverPublished(parent.reload())
self.assertObjectExist(drafts, title_set__title='parent')
self.assertObjectDoesNotExist(public, title_set__title='parent')
self.assertObjectDoesNotExist(public.published(language='en'), title_set__title='parent')
self.assertObjectExist(drafts, title_set__title='child')
self.assertObjectExist(public, title_set__title='child')
self.assertObjectDoesNotExist(public.published(language='en'), title_set__title='child')
parent.reload().publish("en")
# Cascade publish for all pending descendants
for name in ('parent', 'child'):
page = drafts.get(title_set__title=name)
self.assertPublished(page)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(public, title_set__title=name)
self.assertObjectExist(public.published(language='en'), title_set__title=name)
def test_simple_publisher(self):
"""
Creates the stuff needed for these tests.
Please keep this up-to-date (the docstring!)
A
/ \
B C
"""
# Create a simple tree of 3 pages
pageA = create_page("Page A", "nav_playground.html", "en",
published=True)
pageB = create_page("Page B", "nav_playground.html", "en", parent=pageA,
published=True)
pageC = create_page("Page C", "nav_playground.html", "en", parent=pageA,
published=False)
# Assert A and B are published, C unpublished
self.assertTrue(pageA.publisher_public_id)
self.assertTrue(pageB.publisher_public_id)
self.assertTrue(not pageC.publisher_public_id)
self.assertEqual(len(Page.objects.public().published(language="en")), 2)
# Let's publish C now.
pageC.publish("en")
# Assert all are published
self.assertTrue(pageA.publisher_public_id)
self.assertTrue(pageB.publisher_public_id)
self.assertTrue(pageC.publisher_public_id)
self.assertEqual(len(Page.objects.public().published(language="en")), 3)
def test_i18n_publishing(self):
page = self.create_page('parent', published=True)
self.assertEqual(Title.objects.all().count(), 2)
create_title("de", "vater", page)
self.assertEqual(Title.objects.all().count(), 3)
self.assertEqual(Title.objects.filter(published=True).count(), 2)
page.publish('de')
self.assertEqual(Title.objects.all().count(), 4)
self.assertEqual(Title.objects.filter(published=True).count(), 4)
def test_unpublish_unpublish(self):
name = self._testMethodName
page = self.create_page(name, published=True)
drafts = Page.objects.drafts()
published = Page.objects.public().published(language="en")
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
page.unpublish('en')
self.assertFalse(page.is_published('en'))
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectDoesNotExist(published, title_set__title=name)
page.publish('en')
self.assertTrue(page.publisher_public_id)
self.assertObjectExist(drafts, title_set__title=name)
self.assertObjectExist(published, title_set__title=name)
def test_delete_title_unpublish(self):
page = self.create_page('test', published=True)
sub_page = self.create_page('test2', published=True, parent=page)
self.assertPublished(page)
self.assertPublished(sub_page)
page.reload().delete_translations()
self.assertPending(sub_page.reload())
def test_modify_child_while_pending(self):
home = self.create_page("Home", published=True, in_navigation=True)
child = self.create_page("Child", published=True, parent=home,
in_navigation=False)
home.reload().unpublish('en')
self.assertPending(child.reload())
child.refresh_from_db()
child.in_navigation = True
child.save()
# assert draft dirty
self.assertTrue(child.is_published('en'))
self.assertTrue(child.get_title_obj('en').published)
self.assertEqual(child.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
# assert public is still unpublished
self.assertPending(child.publisher_public.reload())
home.reload().publish('en')
# assert draft still dirty
self.assertTrue(child.is_published('en'))
self.assertTrue(child.get_title_obj('en').published)
self.assertEqual(child.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
# assert public is published
self.assertPublished(child.publisher_public.reload())
def test_republish_with_descendants(self):
home = self.create_page("Home", published=True)
child = self.create_page("Child", published=True, parent=home)
grand_child = self.create_page("GC", published=True, parent=child)
# control
self.assertPublished(child)
self.assertPublished(grand_child)
home.reload().unpublish('en')
self.assertPending(child.reload())
self.assertPending(grand_child.reload())
home.reload().publish('en')
self.assertPublished(child.reload())
self.assertPublished(grand_child.reload())
def test_republish_with_dirty_children(self):
home = self.create_page("Home", published=True)
dirty1 = self.create_page("Dirty1", published=True, parent=home)
dirty2 = self.create_page("Dirty2", published=True, parent=home)
home = self.reload(home)
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
dirty1.in_navigation = True
dirty1.save()
home.unpublish('en')
dirty2 = self.reload(dirty2)
dirty2.in_navigation = True
dirty2.save()
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
self.assertTrue(dirty1.is_published)
self.assertTrue(dirty2.publisher_public_id)
self.assertEqual(dirty1.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertEqual(dirty2.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
home = self.reload(home)
with self.assertNumQueries(FuzzyInt(0, 100)):
home.publish('en')
dirty1 = self.reload(dirty1)
dirty2 = self.reload(dirty2)
self.assertTrue(dirty1.is_published("en"))
self.assertTrue(dirty2.is_published("en"))
self.assertTrue(dirty1.publisher_public.is_published("en"))
self.assertTrue(dirty2.publisher_public.is_published("en"))
self.assertEqual(dirty1.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
self.assertEqual(dirty2.get_publisher_state("en"), PUBLISHER_STATE_DIRTY)
def test_republish_with_unpublished_child(self):
"""
Unpub1 was never published, and unpub2 has been unpublished after the
fact. None of the grandchildren should become published.
"""
home = self.create_page("Home", published=True)
unpub1 = self.create_page("Unpub1", published=False, parent=home)
unpub2 = self.create_page("Unpub2", published=True, parent=home)
gc1 = self.create_page("GC1", published=True, parent=unpub1)
gc2 = self.create_page("GC2", published=True, parent=unpub2)
self.assertNeverPublished(unpub1)
self.assertNeverPublished(gc1)
self.assertPublished(unpub2)
self.assertPublished(gc2)
# Un-publish root page
home.reload().unpublish('en')
unpub1 = self.reload(unpub1)
unpub2 = self.reload(unpub2)
unpub2.unpublish('en') # Just marks this as not published
self.assertNeverPublished(unpub1)
self.assertNeverPublished(gc1)
self.assertUnpublished(unpub2.reload())
self.assertPending(gc2.reload())
def test_unpublish_with_descendants(self):
page = self.create_homepage("Page", "nav_playground.html", "en", published=True)
child = self.create_page("Child", parent=page, published=True)
self.create_page("Grandchild", parent=child, published=True)
page = page.reload()
child.reload()
drafts = Page.objects.drafts()
public = Page.objects.public()
self.assertEqual(public.published(language="en").count(), 3)
self.assertEqual(page.node.get_descendant_count(), 2)
base = reverse('pages-root')
for url in (base, base + 'child/', base + 'child/grandchild/'):
response = self.client.get(url)
self.assertEqual(response.status_code, 200, url)
for title in ('Page', 'Child', 'Grandchild'):
self.assertObjectExist(drafts, title_set__title=title)
self.assertObjectExist(public, title_set__title=title)
self.assertObjectExist(public.published(language="en"), title_set__title=title)
item = drafts.get(title_set__title=title)
self.assertTrue(item.publisher_public_id)
self.assertEqual(item.get_publisher_state('en'), PUBLISHER_STATE_DEFAULT)
self.assertTrue(page.unpublish('en'), 'Unpublish was not successful')
self.assertFalse(page.is_published('en'))
cache.clear()
for url in (base, base + 'child/', base + 'child/grandchild/'):
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
for title in ('Page', 'Child', 'Grandchild'):
self.assertObjectExist(drafts, title_set__title=title)
self.assertObjectExist(public, title_set__title=title)
self.assertObjectDoesNotExist(public.published(language="en"), title_set__title=title)
item = drafts.get(title_set__title=title)
if title == 'Page':
self.assertFalse(item.is_published("en"))
self.assertFalse(item.publisher_public.is_published("en"))
self.assertTrue(page.is_dirty('en'))
else:
# The changes to the published subpages are simply that the
# published flag of the PUBLIC instance goes to false, and the
# publisher state is set to mark waiting for parent
self.assertFalse(item.is_published('en'), title)
self.assertTrue(item.get_title_obj('en').published, title)
self.assertFalse(item.publisher_public.is_published('en'), title)
self.assertEqual(item.get_publisher_state('en'), PUBLISHER_STATE_PENDING,
title)
self.assertTrue(item.is_dirty('en'), title)
def test_unpublish_with_dirty_descendants(self):
page = self.create_page("Page", published=True)
child = self.create_page("Child", parent=page, published=True)
gchild = self.create_page("Grandchild", parent=child, published=True)
child.in_navigation = True
child.save()
self.assertTrue(child.is_dirty("en"))
self.assertFalse(gchild.is_dirty('en'))
self.assertTrue(child.publisher_public.is_published('en'))
self.assertTrue(gchild.publisher_public.is_published('en'))
page.reload().unpublish('en')
child = self.reload(child)
gchild = self.reload(gchild)
# Descendants become dirty after unpublish
self.assertTrue(child.is_dirty('en'))
self.assertTrue(gchild.is_dirty('en'))
# However, their public version is still removed no matter what
self.assertFalse(child.publisher_public.is_published('en'))
self.assertFalse(gchild.publisher_public.is_published('en'))
def test_prepublish_descendants(self):
page = self.create_page("Page", published=True)
child_1 = self.create_page("Child", parent=page, published=False)
child_1_2 = self.create_page("Grandchild2", parent=child_1, published=False)
self.create_page("Grandchild3", parent=child_1, published=True)
# Reload "Child" page because it's tree attributes changed when adding
# children to it above.
child_1 = child_1.reload()
# Create the first child of "Child" page as a published root node
child_1_1 = self.create_page("Grandchild", published=True)
# Move first child to "Child"
child_1_1.move_page(target_node=child_1.node, position='first-child')
# Assert "Child" page is not published (we never published it)
self.assertNeverPublished(child_1)
self.assertNeverPublished(child_1_2)
# Assert "first child" is in pending state because
# it's parent the "Child" page is not published.
self.assertPending(child_1_1)
# Publish "Child page"
child_1.reload().publish('en')
# Publish "second child"
child_1_2.reload().publish('en')
self.assertPublished(child_1.reload())
self.assertPublished(child_1_2.reload())
# Assert "first child" is no longer in pending state
# and instead is in published state.
self.assertPublished(child_1_1.reload())
tree = (
(page, '0001'),
(child_1, '00010001'),
(child_1_1, '000100010001'),
(child_1_2, '000100010002'),
)
for page, path in tree:
self.assertEqual(self.reload(page.node).path, path)
def test_republish_multiple_root(self):
# TODO: The paths do not match expected behaviour
home = self.create_homepage("Page", "nav_playground.html", "en", published=True)
other = self.create_page("Another Page", published=True)
child = self.create_page("Child", published=True, parent=home)
child2 = self.create_page("Child", published=True, parent=other)
self.assertTrue(Page.objects.filter(is_home=True).count(), 2)
self.assertTrue(home.is_home)
home = home.reload()
self.assertTrue(home.publisher_public.is_home)
root = reverse('pages-root')
self.assertEqual(home.get_absolute_url(), root)
self.assertEqual(home.get_public_object().get_absolute_url(), root)
self.assertEqual(child.get_absolute_url(), root + 'child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'child/')
self.assertEqual(other.get_absolute_url(), root + 'another-page/')
self.assertEqual(other.get_public_object().get_absolute_url(), root + 'another-page/')
self.assertEqual(child2.get_absolute_url(), root + 'another-page/child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'another-page/child/')
home = self.reload(home)
home.unpublish('en')
other.reload().set_as_homepage()
home = self.reload(home)
other = self.reload(other)
child = self.reload(child)
child2 = self.reload(child2)
self.assertFalse(home.is_home)
self.assertFalse(home.publisher_public.is_home)
self.assertTrue(other.is_home)
self.assertTrue(other.publisher_public.is_home)
self.assertEqual(other.get_absolute_url(), root)
self.assertEqual(other.get_public_object().get_absolute_url(), root)
self.assertEqual(child2.get_absolute_url(), root + 'child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'child/')
self.assertEqual(home.get_absolute_url(), root + 'page/')
self.assertEqual(home.get_public_object().get_absolute_url(), root + 'page/')
self.assertEqual(child.get_absolute_url(), root + 'page/child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'page/child/')
home.publish('en')
home.set_as_homepage()
home = self.reload(home)
other = self.reload(other)
child = self.reload(child)
child2 = self.reload(child2)
self.assertTrue(home.is_home)
self.assertTrue(home.publisher_public.is_home)
self.assertEqual(home.get_absolute_url(), root)
self.assertEqual(home.get_public_object().get_absolute_url(), root)
self.assertEqual(child.get_absolute_url(), root + 'child/')
self.assertEqual(child.get_public_object().get_absolute_url(), root + 'child/')
self.assertEqual(other.get_absolute_url(), root + 'another-page/')
self.assertEqual(other.get_public_object().get_absolute_url(), root + 'another-page/')
self.assertEqual(child2.get_absolute_url(), root + 'another-page/child/')
self.assertEqual(child2.get_public_object().get_absolute_url(), root + 'another-page/child/')
def test_revert_contents(self):
user = self.get_superuser()
page = create_page("Page", "nav_playground.html", "en", published=True,
created_by=user)
placeholder = page.placeholders.get(slot=u"body")
deleted_plugin = add_plugin(placeholder, u"TextPlugin", u"en", body="Deleted content")
text_plugin = add_plugin(placeholder, u"TextPlugin", u"en", body="Public content")
page.publish('en')
# Modify and delete plugins
text_plugin.body = "<p>Draft content</p>"
text_plugin.save()
deleted_plugin.delete()
self.assertEqual(CMSPlugin.objects.count(), 3)
# Now let's revert and restore
page.revert_to_live('en')
self.assertEqual(page.get_publisher_state("en"), PUBLISHER_STATE_DEFAULT)
self.assertEqual(CMSPlugin.objects.count(), 4)
plugins = CMSPlugin.objects.filter(placeholder__page=page)
self.assertEqual(plugins.count(), 2)
plugins = [plugin.get_plugin_instance()[0] for plugin in plugins]
self.assertEqual(plugins[0].body, "Deleted content")
self.assertEqual(plugins[1].body, "Public content")
def test_revert_move(self):
parent = create_page("Parent", "nav_playground.html", "en", published=True)
parent_url = parent.get_absolute_url()
page = create_page("Page", "nav_playground.html", "en", published=True,
parent=parent)
other = create_page("Other", "nav_playground.html", "en", published=True)
other_url = other.get_absolute_url()
child = create_page("Child", "nav_playground.html", "en", published=True,
parent=page)
parent = parent.reload()
page = page.reload()
self.assertEqual(page.get_absolute_url(), parent_url + "page/")
self.assertEqual(child.get_absolute_url(), parent_url + "page/child/")
# Now let's move it (and the child)
page.move_page(other.node)
page = self.reload(page)
child = self.reload(child)
self.assertEqual(page.get_absolute_url(), other_url + "page/")
self.assertEqual(child.get_absolute_url(), other_url + "page/child/")
# Public version changed the url as well
self.assertEqual(page.publisher_public.get_absolute_url(), other_url + "page/")
self.assertEqual(child.publisher_public.get_absolute_url(), other_url + "page/child/")
def test_publish_works_with_descendants(self):
"""
For help understanding what this tests for, see:
http://articles.sitepoint.com/print/hierarchical-data-database
Creates this published structure:
home
/ \
item1 item2
/ \
subitem1 subitem2
"""
home_page = create_page("home", "nav_playground.html", "en",
published=True, in_navigation=False)
create_page("item1", "nav_playground.html", "en", parent=home_page,
published=True)
item2 = create_page("item2", "nav_playground.html", "en", parent=home_page,
published=True)
create_page("subitem1", "nav_playground.html", "en", parent=item2,
published=True)
create_page("subitem2", "nav_playground.html", "en", parent=item2,
published=True)
item2 = item2.reload()
self.assertEqual(Page.objects.filter(publisher_is_draft=False).count(), 5)
self.assertEqual(TreeNode.objects.count(), 5)
child_nodes = list(TreeNode.objects.filter(parent__isnull=False))
for idx, node in enumerate(child_nodes):
self.assertEqual(node.path[0:4], node.parent.path[0:4])
self.assertTrue(node.parent in node.get_ancestors())
self.assertTrue(node in node.parent.get_descendants())
self.assertTrue(node in node.parent.get_children())
# Now call publish again. The structure should not change.
item2.publish('en')
self.assertEqual(Page.objects.filter(publisher_is_draft=False).count(), 5)
self.assertEqual(TreeNode.objects.count(), 5)
child_nodes = list(TreeNode.objects.filter(parent__isnull=False))
for idx, node in enumerate(child_nodes):
self.assertEqual(node.path[0:4], node.parent.path[0:4])
self.assertTrue(node.parent in node.get_ancestors())
self.assertTrue(node in node.parent.get_descendants())
self.assertTrue(node in node.parent.get_children())
|
nthiep/global-ssh-server
|
refs/heads/master
|
gshproject/api/permissions.py
|
1
|
from rest_framework import permissions
from manage.models import Domain
from node.models import Machine
class IsAuthenticatedOrCreate(permissions.IsAuthenticated):
def has_permission(self, request, view):
if request.method == 'POST':
return True
return super(IsAuthenticatedOrCreate, self).has_permission(request, view)
class DomainAuthenticate(object):
def remove_space(self, string):
return string.replace(" ", "").replace("\r\n", "").replace("\t", "")
def has_permission(self, request, view):
if request.method == 'POST':
try:
domain = Domain.objects.get(domain=request.data['domain'])
machine = Machine.objects.get(id=str(request.data['id_machine']))
if domain and domain.check_password(request.data['password']):
if domain.filter_mode:
if domain.filter_type:
if machine.mac not in self.remove_space(domain.listmac).split(";"):
return False
else:
if machine.mac in self.remove_space(domain.listmac).split(";"):
return False
request.domain = domain
return True
except Domain.DoesNotExist:
return None
except:
return False
return False
|
ksmit799/Toontown-Source
|
refs/heads/master
|
toontown/speedchat/TTSCResistanceTerminal.py
|
6
|
from otp.speedchat.SCTerminal import SCTerminal
from toontown.chat import ResistanceChat
TTSCResistanceMsgEvent = 'TTSCResistanceMsg'
def decodeTTSCResistanceMsg(textId):
return ResistanceChat.getChatText(textId)
class TTSCResistanceTerminal(SCTerminal):
def __init__(self, textId, charges):
SCTerminal.__init__(self)
self.setCharges(charges)
self.textId = textId
self.text = ResistanceChat.getItemText(self.textId)
def isWhisperable(self):
return False
def handleSelect(self):
SCTerminal.handleSelect(self)
messenger.send(self.getEventName(TTSCResistanceMsgEvent), [self.textId])
|
vitaly4uk/django
|
refs/heads/master
|
django/contrib/gis/management/commands/ogrinspect.py
|
369
|
import argparse
from django.contrib.gis import gdal
from django.core.management.base import BaseCommand, CommandError
from django.utils.inspect import get_func_args
class LayerOptionAction(argparse.Action):
"""
Custom argparse action for the `ogrinspect` `layer_key` keyword option
which may be an integer or a string.
"""
def __call__(self, parser, namespace, value, option_string=None):
try:
setattr(namespace, self.dest, int(value))
except ValueError:
setattr(namespace, self.dest, value)
class ListOptionAction(argparse.Action):
"""
Custom argparse action for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.lower() == 'true':
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, value.split(','))
class Command(BaseCommand):
help = ('Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode')
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('data_source', help='Path to the data source.')
parser.add_argument('model_name', help='Name of the model to create.')
parser.add_argument('--blank', dest='blank',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.')
parser.add_argument('--decimal', dest='decimal',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.')
parser.add_argument('--geom-name', dest='geom_name', default='geom',
help='Specifies the model name for the Geometry Field '
'(defaults to `geom`)')
parser.add_argument('--layer', dest='layer_key',
action=LayerOptionAction, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.')
parser.add_argument('--multi-geom', action='store_true',
dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.')
parser.add_argument('--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__`/`__str__` function.')
parser.add_argument('--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` statement.')
parser.add_argument('--null', dest='null', action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.')
parser.add_argument('--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.')
parser.add_argument('--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.')
def handle(self, *args, **options):
data_source, model_name = options.pop('data_source'), options.pop('model_name')
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.GDALException as msg:
raise CommandError(msg)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
# Filter options to params accepted by `_ogrinspect`
ogr_options = {k: v for k, v in options.items()
if k in get_func_args(_ogrinspect) and v is not None}
output = [s for s in _ogrinspect(ds, model_name, **ogr_options)]
if options['mapping']:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {'geom_name': options['geom_name'],
'layer_key': options['layer_key'],
'multi_geom': options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = {v: k for k, v in mapping_dict.items()}
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend(" '%s' : '%s'," % (
rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields
)
output.extend([" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n'
|
forevernull/incubator-airflow
|
refs/heads/master
|
airflow/example_dags/example_trigger_controller_dag.py
|
45
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example illustrates the use of the TriggerDagRunOperator. There are 2
entities at work in this scenario:
1. The Controller DAG - the DAG that conditionally executes the trigger
2. The Target DAG - DAG being triggered (in example_trigger_target_dag.py)
This example illustrates the following features :
1. A TriggerDagRunOperator that takes:
a. A python callable that decides whether or not to trigger the Target DAG
b. An optional params dict passed to the python callable to help in
evaluating whether or not to trigger the Target DAG
c. The id (name) of the Target DAG
d. The python callable can add contextual info to the DagRun created by
way of adding a Pickleable payload (e.g. dictionary of primitives). This
state is then made available to the TargetDag
2. A Target DAG : c.f. example_trigger_target_dag.py
"""
from airflow import DAG
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from datetime import datetime
import pprint
pp = pprint.PrettyPrinter(indent=4)
def conditionally_trigger(context, dag_run_obj):
"""This function decides whether or not to Trigger the remote DAG"""
c_p =context['params']['condition_param']
print("Controller DAG : conditionally_trigger = {}".format(c_p))
if context['params']['condition_param']:
dag_run_obj.payload = {'message': context['params']['message']}
pp.pprint(dag_run_obj.payload)
return dag_run_obj
# Define the DAG
dag = DAG(dag_id='example_trigger_controller_dag',
default_args={"owner": "airflow",
"start_date": datetime.now()},
schedule_interval='@once')
# Define the single task in this controller example DAG
trigger = TriggerDagRunOperator(task_id='test_trigger_dagrun',
trigger_dag_id="example_trigger_target_dag",
python_callable=conditionally_trigger,
params={'condition_param': True,
'message': 'Hello World'},
dag=dag)
|
ammubhave/dropboxfs
|
refs/heads/master
|
dropbox-python-sdk-2.2.0/build/lib.linux-x86_64-2.7/dropbox/session.py
|
8
|
"""
dropbox.session.DropboxSession is responsible for holding OAuth authentication
info (app key/secret, request key/secret, access key/secret). It knows how to
use all of this information to craft properly constructed requests to Dropbox.
A DropboxSession object must be passed to a dropbox.client.DropboxClient object upon
initialization.
"""
from __future__ import absolute_import
import random
import sys
import time
import urllib
try:
from urlparse import parse_qs
except ImportError:
# fall back for Python 2.5
from cgi import parse_qs
from . import rest
class OAuthToken(object):
"""
A class representing an OAuth token. Contains two fields: ``key`` and
``secret``.
"""
def __init__(self, key, secret):
self.key = key
self.secret = secret
class BaseSession(object):
API_VERSION = 1
API_HOST = "api.dropbox.com"
WEB_HOST = "www.dropbox.com"
API_CONTENT_HOST = "api-content.dropbox.com"
API_NOTIFICATION_HOST = "api-notify.dropbox.com"
def __init__(self, consumer_key, consumer_secret, access_type="auto", locale=None, rest_client=rest.RESTClient):
"""Initialize a DropboxSession object.
Your consumer key and secret are available
at https://www.dropbox.com/developers/apps
Args:
- ``access_type``: Either 'auto' (the default), 'dropbox', or
'app_folder'. You probably don't need to specify this and should
just use the default.
- ``locale``: A locale string ('en', 'pt_PT', etc.) [optional]
The locale setting will be used to translate any user-facing error
messages that the server generates. At this time Dropbox supports
'en', 'es', 'fr', 'de', and 'ja', though we will be supporting more
languages in the future. If you send a language the server doesn't
support, messages will remain in English. Look for these translated
messages in rest.ErrorResponse exceptions as e.user_error_msg.
"""
assert access_type in ['dropbox', 'app_folder', 'auto'], "expected access_type of 'dropbox' or 'app_folder'"
self.consumer_creds = OAuthToken(consumer_key, consumer_secret)
self.token = None
self.request_token = None
self.root = 'sandbox' if access_type == 'app_folder' else access_type
self.locale = locale
self.rest_client = rest_client
def is_linked(self):
"""Return whether the DropboxSession has an access token attached."""
return bool(self.token)
def unlink(self):
"""Remove any attached access token from the DropboxSession."""
self.token = None
def build_path(self, target, params=None):
"""Build the path component for an API URL.
This method urlencodes the parameters, adds them
to the end of the target url, and puts a marker for the API
version in front.
Args:
- ``target``: A target url (e.g. '/files') to build upon.
- ``params``: A dictionary of parameters (name to value). [optional]
Returns:
- The path and parameters components of an API URL.
"""
if sys.version_info < (3,) and type(target) == unicode:
target = target.encode("utf8")
target_path = urllib.quote(target)
params = params or {}
params = params.copy()
if self.locale:
params['locale'] = self.locale
if params:
return "/%s%s?%s" % (self.API_VERSION, target_path, urllib.urlencode(params))
else:
return "/%s%s" % (self.API_VERSION, target_path)
def build_url(self, host, target, params=None):
"""Build an API URL.
This method adds scheme and hostname to the path
returned from build_path.
Args:
- ``target``: A target url (e.g. '/files') to build upon.
- ``params``: A dictionary of parameters (name to value). [optional]
Returns:
- The full API URL.
"""
return "https://%s%s" % (host, self.build_path(target, params))
class DropboxSession(BaseSession):
def set_token(self, access_token, access_token_secret):
"""Attach an access token to the DropboxSession.
Note that the access 'token' is made up of both a token string
and a secret string.
"""
self.token = OAuthToken(access_token, access_token_secret)
def set_request_token(self, request_token, request_token_secret):
"""Attach an request token to the DropboxSession.
Note that the request 'token' is made up of both a token string
and a secret string.
"""
self.request_token = OAuthToken(request_token, request_token_secret)
def build_authorize_url(self, request_token, oauth_callback=None):
"""Build a request token authorization URL.
After obtaining a request token, you'll need to send the user to
the URL returned from this function so that they can confirm that
they want to connect their account to your app.
Args:
- ``request_token``: A request token from obtain_request_token.
- ``oauth_callback``: A url to redirect back to with the authorized
request token.
Returns:
- An authorization for the given request token.
"""
params = {'oauth_token': request_token.key,
}
if oauth_callback:
params['oauth_callback'] = oauth_callback
return self.build_url(self.WEB_HOST, '/oauth/authorize', params)
def obtain_request_token(self):
"""Obtain a request token from the Dropbox API.
This is your first step in the OAuth process. You call this to get a
request_token from the Dropbox server that you can then use with
DropboxSession.build_authorize_url() to get the user to authorize it.
After it's authorized you use this token with
DropboxSession.obtain_access_token() to get an access token.
NOTE: You should only need to do this once for each user, and then you
can store the access token for that user for later operations.
Returns:
- An :py:class:`OAuthToken` object representing the
request token Dropbox assigned to this app. Also attaches the
request token as self.request_token.
"""
self.token = None # clear any token currently on the request
url = self.build_url(self.API_HOST, '/oauth/request_token')
headers, params = self.build_access_headers('POST', url)
response = self.rest_client.POST(url, headers=headers, params=params, raw_response=True)
self.request_token = self._parse_token(response.read())
return self.request_token
def obtain_access_token(self, request_token=None):
"""Obtain an access token for a user.
After you get a request token, and then send the user to the authorize
URL, you can use the authorized request token with this method to get the
access token to use for future operations. The access token is stored on
the session object.
Args:
- ``request_token``: A request token from obtain_request_token. [optional]
The request_token should have been authorized via the
authorization url from build_authorize_url. If you don't pass
a request_token, the fallback is self.request_token, which
will exist if you previously called obtain_request_token on this
DropboxSession instance.
Returns:
- An :py:class:`OAuthToken` object with fields ``key`` and ``secret``
representing the access token Dropbox assigned to this app and
user. Also attaches the access token as self.token.
"""
request_token = request_token or self.request_token
assert request_token, "No request_token available on the session. Please pass one."
url = self.build_url(self.API_HOST, '/oauth/access_token')
headers, params = self.build_access_headers('POST', url, request_token=request_token)
response = self.rest_client.POST(url, headers=headers, params=params, raw_response=True)
self.token = self._parse_token(response.read())
return self.token
def build_access_headers(self, method, resource_url, params=None, request_token=None):
"""Build OAuth access headers for a future request.
Args:
- ``method``: The HTTP method being used (e.g. 'GET' or 'POST').
- ``resource_url``: The full url the request will be made to.
- ``params``: A dictionary of parameters to add to what's already on the url.
Typically, this would consist of POST parameters.
Returns:
- A tuple of (header_dict, params) where header_dict is a dictionary
of header names and values appropriate for passing into dropbox.rest.RESTClient
and params is a dictionary like the one that was passed in, but augmented with
oauth-related parameters as appropriate.
"""
if params is None:
params = {}
else:
params = params.copy()
oauth_params = {
'oauth_consumer_key' : self.consumer_creds.key,
'oauth_timestamp' : self._generate_oauth_timestamp(),
'oauth_nonce' : self._generate_oauth_nonce(),
'oauth_version' : self._oauth_version(),
}
token = request_token if request_token is not None else self.token
if token:
oauth_params['oauth_token'] = token.key
self._oauth_sign_request(oauth_params, self.consumer_creds, token)
params.update(oauth_params)
return {}, params
@classmethod
def _oauth_sign_request(cls, params, consumer_pair, token_pair):
params.update({'oauth_signature_method' : 'PLAINTEXT',
'oauth_signature' : ('%s&%s' % (consumer_pair.secret, token_pair.secret)
if token_pair is not None else
'%s&' % (consumer_pair.secret,))})
@classmethod
def _generate_oauth_timestamp(cls):
return int(time.time())
@classmethod
def _generate_oauth_nonce(cls, length=8):
return ''.join([str(random.randint(0, 9)) for i in range(length)])
@classmethod
def _oauth_version(cls):
return '1.0'
@classmethod
def _parse_token(cls, s):
if not s:
raise ValueError("Invalid parameter string.")
params = parse_qs(s, keep_blank_values=False)
if not params:
raise ValueError("Invalid parameter string: %r" % s)
try:
key = params['oauth_token'][0]
except Exception:
raise ValueError("'oauth_token' not found in OAuth request.")
try:
secret = params['oauth_token_secret'][0]
except Exception:
raise ValueError("'oauth_token_secret' not found in "
"OAuth request.")
return OAuthToken(key, secret)
# Don't use this class directly.
class DropboxOAuth2Session(BaseSession):
def __init__(self, oauth2_access_token, locale, rest_client=rest.RESTClient):
super(DropboxOAuth2Session, self).__init__("", "", "auto", locale=locale, rest_client=rest_client)
self.access_token = oauth2_access_token
def build_access_headers(self, method, resource_url, params=None, token=None):
assert token is None
headers = {"Authorization": "Bearer " + self.access_token}
return headers, params
|
mahak/ansible
|
refs/heads/devel
|
test/integration/targets/collections/collection_root_user/ansible_collections/testns/testcoll/plugins/modules/win_selfcontained.py
|
82
|
# docs for Windows module would go here; just ensure we don't accidentally load this instead of the .ps1
|
blackye/luscan-devel
|
refs/heads/master
|
thirdparty_libs/django/templatetags/tz.py
|
114
|
from datetime import datetime, tzinfo
try:
import pytz
except ImportError:
pytz = None
from django.template import Node
from django.template import TemplateSyntaxError, Library
from django.utils import six
from django.utils import timezone
register = Library()
# HACK: datetime is an old-style class, create a new-style equivalent
# so we can define additional attributes.
class datetimeobject(datetime, object):
pass
# Template filters
@register.filter
def localtime(value):
"""
Converts a datetime to local time in the active time zone.
This only makes sense within a {% localtime off %} block.
"""
return do_timezone(value, timezone.get_current_timezone())
@register.filter
def utc(value):
"""
Converts a datetime to UTC.
"""
return do_timezone(value, timezone.utc)
@register.filter('timezone')
def do_timezone(value, arg):
"""
Converts a datetime to local time in a given time zone.
The argument must be an instance of a tzinfo subclass or a time zone name.
If it is a time zone name, pytz is required.
Naive datetimes are assumed to be in local time in the default time zone.
"""
if not isinstance(value, datetime):
return ''
# Obtain a timezone-aware datetime
try:
if timezone.is_naive(value):
default_timezone = timezone.get_default_timezone()
value = timezone.make_aware(value, default_timezone)
# Filters must never raise exceptions, and pytz' exceptions inherit
# Exception directly, not a specific subclass. So catch everything.
except Exception:
return ''
# Obtain a tzinfo instance
if isinstance(arg, tzinfo):
tz = arg
elif isinstance(arg, six.string_types) and pytz is not None:
try:
tz = pytz.timezone(arg)
except pytz.UnknownTimeZoneError:
return ''
else:
return ''
result = timezone.localtime(value, tz)
# HACK: the convert_to_local_time flag will prevent
# automatic conversion of the value to local time.
result = datetimeobject(result.year, result.month, result.day,
result.hour, result.minute, result.second,
result.microsecond, result.tzinfo)
result.convert_to_local_time = False
return result
# Template tags
class LocalTimeNode(Node):
"""
Template node class used by ``localtime_tag``.
"""
def __init__(self, nodelist, use_tz):
self.nodelist = nodelist
self.use_tz = use_tz
def render(self, context):
old_setting = context.use_tz
context.use_tz = self.use_tz
output = self.nodelist.render(context)
context.use_tz = old_setting
return output
class TimezoneNode(Node):
"""
Template node class used by ``timezone_tag``.
"""
def __init__(self, nodelist, tz):
self.nodelist = nodelist
self.tz = tz
def render(self, context):
with timezone.override(self.tz.resolve(context)):
output = self.nodelist.render(context)
return output
class GetCurrentTimezoneNode(Node):
"""
Template node class used by ``get_current_timezone_tag``.
"""
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = timezone.get_current_timezone_name()
return ''
@register.tag('localtime')
def localtime_tag(parser, token):
"""
Forces or prevents conversion of datetime objects to local time,
regardless of the value of ``settings.USE_TZ``.
Sample usage::
{% localtime off %}{{ value_in_utc }}{% endlocaltime %}
"""
bits = token.split_contents()
if len(bits) == 1:
use_tz = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" %
bits[0])
else:
use_tz = bits[1] == 'on'
nodelist = parser.parse(('endlocaltime',))
parser.delete_first_token()
return LocalTimeNode(nodelist, use_tz)
@register.tag('timezone')
def timezone_tag(parser, token):
"""
Enables a given time zone just for this block.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, the default time zone is used within the block.
Sample usage::
{% timezone "Europe/Paris" %}
It is {{ now }} in Paris.
{% endtimezone %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (timezone)" %
bits[0])
tz = parser.compile_filter(bits[1])
nodelist = parser.parse(('endtimezone',))
parser.delete_first_token()
return TimezoneNode(nodelist, tz)
@register.tag("get_current_timezone")
def get_current_timezone_tag(parser, token):
"""
Stores the name of the current time zone in the context.
Usage::
{% get_current_timezone as TIME_ZONE %}
This will fetch the currently active time zone and put its name
into the ``TIME_ZONE`` context variable.
"""
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_timezone' requires "
"'as variable' (got %r)" % args)
return GetCurrentTimezoneNode(args[2])
|
ecobost/pipeline
|
refs/heads/master
|
python/pipeline/utils/enhancement.py
|
5
|
import numpy as np
from scipy import ndimage
def lcn(image, sigmas=(12, 12)):
""" Local contrast normalization.
Normalize each pixel using mean and stddev computed on a local neighborhood.
We use gaussian filters rather than uniform filters to compute the local mean and std
to soften the effect of edges. Essentially we are using a fuzzy local neighborhood.
Equivalent using a hard defintion of neighborhood will be:
local_mean = ndimage.uniform_filter(image, size=(32, 32))
:param np.array image: Array with raw two-photon images.
:param tuple sigmas: List with sigmas (one per axis) to use for the gaussian filter.
Smaller values result in more local neighborhoods. 15-30 microns should work fine
"""
local_mean = ndimage.gaussian_filter(image, sigmas)
local_var = ndimage.gaussian_filter(image ** 2, sigmas) - local_mean ** 2
local_std = np.sqrt(np.clip(local_var, a_min=0, a_max=None))
norm = (image - local_mean) / (local_std + 1e-7)
return norm
def sharpen_2pimage(image, laplace_sigma=0.7, low_percentile=3, high_percentile=99.9):
""" Apply a laplacian filter, clip pixel range and normalize.
:param np.array image: Array with raw two-photon images.
:param float laplace_sigma: Sigma of the gaussian used in the laplace filter.
:param float low_percentile, high_percentile: Percentiles at which to clip.
:returns: Array of same shape as input. Sharpened image.
"""
sharpened = image - ndimage.gaussian_laplace(image, laplace_sigma)
clipped = np.clip(sharpened, *np.percentile(sharpened, [low_percentile, high_percentile]))
norm = (clipped - clipped.mean()) / (clipped.max() - clipped.min() + 1e-7)
return norm
def create_correlation_image(scan):
""" Compute the correlation image for the given scan.
At each pixel, we compute the correlation (over time) with each of its eight
neighboring pixels and average them.
:param np.array scan: 3-dimensional scan (image_height, image_width, num_frames).
:returns: Correlation image. 2-dimensional array (image_height x image_width).
:rtype np.array
..note:: Even though this code does not reuse the correlations between pixels for the
next iteration it is as efficient in time and (slightly better in) memory than the
dynamic programming implementation below. It may be due to vectorization usage.
"""
from itertools import product
# Get image dimensions
image_height, image_width, num_frames = scan.shape
# Compute deviations from the mean (in place)
mean_image = np.mean(scan, axis=-1, keepdims=True)
scan -= mean_image # in place
deviations = scan
# Calculate (unnormalized) standard deviation per pixel
stddev_image = np.empty([image_height, image_width])
for y, x in product(range(image_height), range(image_width)):
stddev_image[y, x] = np.sum(deviations[y, x] ** 2)
stddev_image = np.sqrt(stddev_image)
# we don't use np.sum(deviations**2, axis=-1) because it creates a copy of the scan
# Cut a 3 x 3 square around each pixel and compute their (mean) pair-wise correlation.
correlation_image = np.empty([image_height, image_width])
for y, x in product(range(image_height), range(image_width)):
yslice = slice(max(y - 1, 0), min(y + 2, image_height))
xslice = slice(max(x - 1, 0), min(x + 2, image_width))
numerator = np.inner(deviations[yslice, xslice], deviations[y, x])
correlations = numerator / stddev_image[yslice, xslice]
correlations[min(1, y), min(1, x)] = 0
correlation_image[y, x] = np.sum(correlations) / (correlations.size - 1)
correlation_image /= stddev_image
# Return scan back to original values
scan += mean_image
return correlation_image
|
mydongistiny/external_chromium_org
|
refs/heads/benzo
|
mojo/public/tools/bindings/pylib/mojom/generate/data_tests.py
|
104
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import data
import test_support
EXPECT_EQ = test_support.EXPECT_EQ
EXPECT_TRUE = test_support.EXPECT_TRUE
RunTest = test_support.RunTest
def DeepEquals(d1, d2):
if d1 == d2:
return True
if d2.__class__ != d2.__class__:
return False
if isinstance(d1, dict):
if set(d1.keys()) != set(d2.keys()):
return False
for key in d1.keys():
if not DeepEquals(d1[key], d2[key]):
return False
return True
if isinstance(d1, (list, tuple)):
if len(d1) != len(d2):
return False
for i in range(len(d1)):
if not DeepEquals(d1[i], d2[i]):
return False
return True
return False
test_dict = {
'name': 'test',
'namespace': 'testspace',
'structs': [{
'name': 'teststruct',
'fields': [
{'name': 'testfield1', 'kind': 'i32'},
{'name': 'testfield2', 'kind': 'a:i32', 'ordinal': 42}]}],
'interfaces': [{
'name': 'Server',
'client': None,
'methods': [{
'name': 'Foo',
'parameters': [
{'name': 'foo', 'kind': 'i32'},
{'name': 'bar', 'kind': 'a:x:teststruct'}],
'ordinal': 42}]}]
}
def TestRead():
module = data.ModuleFromData(test_dict)
return test_support.TestTestModule(module)
def TestWrite():
module = test_support.BuildTestModule()
d = data.ModuleToData(module)
return EXPECT_TRUE(DeepEquals(test_dict, d))
def TestWriteRead():
module1 = test_support.BuildTestModule()
dict1 = data.ModuleToData(module1)
module2 = data.ModuleFromData(dict1)
return EXPECT_TRUE(test_support.ModulesAreEqual(module1, module2))
def Main(args):
errors = 0
errors += RunTest(TestWriteRead)
errors += RunTest(TestRead)
errors += RunTest(TestWrite)
return errors
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
amyvmiwei/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_email/test_inversion.py
|
120
|
"""Test the parser and generator are inverses.
Note that this is only strictly true if we are parsing RFC valid messages and
producing RFC valid messages.
"""
import io
import unittest
from email import policy, message_from_bytes
from email.generator import BytesGenerator
from test.test_email import TestEmailBase, parameterize
# This is like textwrap.dedent for bytes, except that it uses \r\n for the line
# separators on the rebuilt string.
def dedent(bstr):
lines = bstr.splitlines()
if not lines[0].strip():
raise ValueError("First line must contain text")
stripamt = len(lines[0]) - len(lines[0].lstrip())
return b'\r\n'.join(
[x[stripamt:] if len(x)>=stripamt else b''
for x in lines])
@parameterize
class TestInversion(TestEmailBase, unittest.TestCase):
def msg_as_input(self, msg):
m = message_from_bytes(msg, policy=policy.SMTP)
b = io.BytesIO()
g = BytesGenerator(b)
g.flatten(m)
self.assertEqual(b.getvalue(), msg)
# XXX: spaces are not preserved correctly here yet in the general case.
msg_params = {
'header_with_one_space_body': (dedent(b"""\
From: abc@xyz.com
X-Status:\x20
Subject: test
foo
"""),),
}
if __name__ == '__main__':
unittest.main()
|
pkats15/hdt_analyzer
|
refs/heads/master
|
django_test/django_venv/Lib/site-packages/pip/_vendor/html5lib/serializer/__init__.py
|
1731
|
from __future__ import absolute_import, division, unicode_literals
from .. import treewalkers
from .htmlserializer import HTMLSerializer
def serialize(input, tree="etree", format="html", encoding=None,
**serializer_opts):
# XXX: Should we cache this?
walker = treewalkers.getTreeWalker(tree)
if format == "html":
s = HTMLSerializer(**serializer_opts)
else:
raise ValueError("type must be html")
return s.render(walker(input), encoding)
|
odlgroup/odl
|
refs/heads/master
|
examples/solvers/nuclear_norm_minimization.py
|
2
|
"""Nuclear norm minimization using the Douglas-Rachford solver.
Solves the optimization problem
min_{x_1, x_2} ||x_1 - g_1||_2^2 + ||x_2 - g_2||_2^2 +
lam || [grad(x_1), grad(x_2)] ||_*
where ``grad`` is the spatial gradient, ``g`` the given noisy data and
``|| . ||_*`` is the nuclear norm.
The nuclear norm introduces a coupling between the channels, and hence we
expect that edges should coincide in the optimal solution.
"""
import odl
# Create space that the function should live in. Here, we want a vector valued
# function, so we create the tuple of two spaces.
space = odl.uniform_discr(0, 1, 100)
pspace = odl.ProductSpace(space, 2)
# Create the gradient operator on the set of vector-valued functions.
# We select pad_mode='order1' so that we have a Neumann-style boundary
# condition. Here we assume the gradient is continuous at the boundary.
gradient = odl.Gradient(space, pad_mode='order1')
pgradient = odl.DiagonalOperator(gradient, 2)
# Create the data. The first part is a linear function, the second is a step
# function at x=0.6
data = pspace.element([lambda x: x, lambda x: x > 0.6])
data.show('data')
# Create functionals for the data discrepancy (L2 squared) and for the
# regularizer (nuclear norm). The nuclear norm is defined on the range of
# the vectorial gradient, which is vector valued.
l2err = odl.solvers.L2NormSquared(pspace).translated(data)
nuc_norm = 0.02 * odl.solvers.NuclearNorm(pgradient.range)
# Assemble operators and functionals for the solver routine
lin_ops = [odl.IdentityOperator(pspace), pgradient]
g = [l2err, nuc_norm]
# The solver we want to use also takes an additional functional f which can be
# used to enforce bounds constraints and other prior information. Here we lack
# prior information so we set it to zero.
f = odl.solvers.ZeroFunctional(pspace)
# Create a callback that shows the current function value and also shows the
# iterate graphically every 20:th step.
func = f + l2err + nuc_norm * pgradient
callback = (odl.solvers.CallbackPrint(func) &
odl.solvers.CallbackShow(step=20))
# Solve the problem. Here the parameters are chosen in order to ensure
# convergence, see the documentation for further information.
# We select the data as an initial guess.
x = data.copy()
odl.solvers.douglas_rachford_pd(x, f, g, lin_ops,
tau=1e-2, sigma=[1.0, 1e-3],
niter=2000, callback=callback)
x.show('Reconstruction', force_show=True)
|
hyperized/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/centurylink/clc_publicip.py
|
47
|
#!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_publicip
short_description: Add and Delete public ips on servers in CenturyLink Cloud.
description:
- An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
version_added: "2.0"
options:
protocol:
description:
- The protocol that the public IP will listen for.
default: TCP
choices: ['TCP', 'UDP', 'ICMP']
ports:
description:
- A list of ports to expose. This is required when state is 'present'
server_ids:
description:
- A list of servers to create public ips on.
required: True
state:
description:
- Determine whether to create or delete public IPs. If present module will not create a second public ip if one
already exists.
default: present
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the tasks to finish before returning.
type: bool
default: 'yes'
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: Add Public IP to Server
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create Public IP For Servers
clc_publicip:
protocol: TCP
ports:
- 80
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: present
register: clc
- name: debug
debug:
var: clc
- name: Delete Public IP from Server
hosts: localhost
gather_facts: False
connection: local
tasks:
- name: Create Public IP For Servers
clc_publicip:
server_ids:
- UC1TEST-SVR01
- UC1TEST-SVR02
state: absent
register: clc
- name: debug
debug:
var: clc
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
'''
__version__ = '${version}'
import os
import traceback
from distutils.version import LooseVersion
REQUESTS_IMP_ERR = None
try:
import requests
except ImportError:
REQUESTS_IMP_ERR = traceback.format_exc()
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
CLC_IMP_ERR = None
try:
import clc as clc_sdk
from clc import CLCException
except ImportError:
CLC_IMP_ERR = traceback.format_exc()
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class ClcPublicIp(object):
clc = clc_sdk
module = None
def __init__(self, module):
"""
Construct module
"""
self.module = module
if not CLC_FOUND:
self.module.fail_json(msg=missing_required_lib('clc-sdk'), exception=CLC_IMP_ERR)
if not REQUESTS_FOUND:
self.module.fail_json(msg=missing_required_lib('requests'), exception=REQUESTS_IMP_ERR)
if requests.__version__ and LooseVersion(requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
params = self.module.params
server_ids = params['server_ids']
ports = params['ports']
protocol = params['protocol']
state = params['state']
if state == 'present':
changed, changed_server_ids, requests = self.ensure_public_ip_present(
server_ids=server_ids, protocol=protocol, ports=ports)
elif state == 'absent':
changed, changed_server_ids, requests = self.ensure_public_ip_absent(
server_ids=server_ids)
else:
return self.module.fail_json(msg="Unknown State: " + state)
self._wait_for_requests_to_complete(requests)
return self.module.exit_json(changed=changed,
server_ids=changed_server_ids)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
protocol=dict(default='TCP', choices=['TCP', 'UDP', 'ICMP']),
ports=dict(type='list'),
wait=dict(type='bool', default=True),
state=dict(default='present', choices=['present', 'absent']),
)
return argument_spec
def ensure_public_ip_present(self, server_ids, protocol, ports):
"""
Ensures the given server ids having the public ip available
:param server_ids: the list of server ids
:param protocol: the ip protocol
:param ports: the list of ports to expose
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) == 0]
ports_to_expose = [{'protocol': protocol, 'port': port}
for port in ports]
for server in servers_to_change:
if not self.module.check_mode:
result = self._add_publicip_to_server(server, ports_to_expose)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _add_publicip_to_server(self, server, ports_to_expose):
result = None
try:
result = server.PublicIPs().Add(ports_to_expose)
except CLCException as ex:
self.module.fail_json(msg='Failed to add public ip to the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def ensure_public_ip_absent(self, server_ids):
"""
Ensures the given server ids having the public ip removed if there is any
:param server_ids: the list of server ids
:return: (changed, changed_server_ids, results)
changed: A flag indicating if there is any change
changed_server_ids : the list of server ids that are changed
results: The result list from clc public ip call
"""
changed = False
results = []
changed_server_ids = []
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
servers_to_change = [
server for server in servers if len(
server.PublicIPs().public_ips) > 0]
for server in servers_to_change:
if not self.module.check_mode:
result = self._remove_publicip_from_server(server)
results.append(result)
changed_server_ids.append(server.id)
changed = True
return changed, changed_server_ids, results
def _remove_publicip_from_server(self, server):
result = None
try:
for ip_address in server.PublicIPs().public_ips:
result = ip_address.Delete()
except CLCException as ex:
self.module.fail_json(msg='Failed to remove public ip from the server : {0}. {1}'.format(
server.id, ex.response_text
))
return result
def _wait_for_requests_to_complete(self, requests_lst):
"""
Waits until the CLC requests are complete if the wait argument is True
:param requests_lst: The list of CLC request objects
:return: none
"""
if not self.module.params['wait']:
return
for request in requests_lst:
request.WaitUntilComplete()
for request_details in request.requests:
if request_details.Status() != 'succeeded':
self.module.fail_json(
msg='Unable to process public ip request')
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_ids, message):
"""
Gets list of servers form CLC api
"""
try:
return self.clc.v2.Servers(server_ids).servers
except CLCException as exception:
self.module.fail_json(msg=message + ': %s' % exception)
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
module = AnsibleModule(
argument_spec=ClcPublicIp._define_module_argument_spec(),
supports_check_mode=True
)
clc_public_ip = ClcPublicIp(module)
clc_public_ip.process_request()
if __name__ == '__main__':
main()
|
scpeters/catkin
|
refs/heads/indigo-devel
|
python/catkin/__init__.py
|
12133432
| |
pgmillon/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/digital_ocean/digital_ocean_image_info.py
|
6
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_image_info
short_description: Gather information about DigitalOcean images
description:
- This module can be used to gather information about DigitalOcean provided images.
- These images can be either of type C(distribution), C(application) and C(private).
- This module was called C(digital_ocean_image_facts) before Ansible 2.9. The usage did not change.
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.6"
options:
image_type:
description:
- Specifies the type of image information to be retrived.
- If set to C(application), then information are gathered related to all application images.
- If set to C(distribution), then information are gathered related to all distribution images.
- If set to C(private), then information are gathered related to all private images.
- If not set to any of above, then information are gathered related to all images.
default: 'all'
choices: [ 'all', 'application', 'distribution', 'private' ]
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment: digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather information about all images
digital_ocean_image_info:
image_type: all
oauth_token: "{{ oauth_token }}"
- name: Gather information about application images
digital_ocean_image_info:
image_type: application
oauth_token: "{{ oauth_token }}"
- name: Gather information about distribution images
digital_ocean_image_info:
image_type: distribution
oauth_token: "{{ oauth_token }}"
- name: Get distribution about image with slug coreos-beta
digital_ocean_image_info:
register: resp_out
- set_fact:
distribution_name: "{{ item.distribution }}"
loop: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?slug=='coreos-beta']"
- debug: var=distribution_name
'''
RETURN = '''
data:
description: DigitalOcean image information
returned: success
type: list
sample: [
{
"created_at": "2018-02-02T07:11:43Z",
"distribution": "CoreOS",
"id": 31434061,
"min_disk_size": 20,
"name": "1662.1.0 (beta)",
"public": true,
"regions": [
"nyc1",
"sfo1",
"nyc2",
"ams2",
"sgp1",
"lon1",
"nyc3",
"ams3",
"fra1",
"tor1",
"sfo2",
"blr1"
],
"size_gigabytes": 0.42,
"slug": "coreos-beta",
"type": "snapshot"
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
image_type = module.params['image_type']
rest = DigitalOceanHelper(module)
base_url = 'images?'
if image_type == 'distribution':
base_url += "type=distribution&"
elif image_type == 'application':
base_url += "type=application&"
elif image_type == 'private':
base_url += "private=true&"
images = rest.get_paginated_data(base_url=base_url, data_key_name='images')
module.exit_json(changed=False, data=images)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
image_type=dict(type='str',
required=False,
choices=['all', 'application', 'distribution', 'private'],
default='all'
)
)
module = AnsibleModule(argument_spec=argument_spec)
if module._name == 'digital_ocean_image_facts':
module.deprecate("The 'digital_ocean_image_facts' module has been renamed to 'digital_ocean_image_info'", version='2.13')
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
|
cneill/designate
|
refs/heads/master
|
contrib/tempest/dns_schema/records.py
|
9
|
# Copyright 2014 Hewlett-Packard Development Company, L.P
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api_schema.dns import parameter_types
list_records = {
"status_code": [200],
"response_body": {
"type": "object",
"properties": {
"records": {
"type": "array",
"items": {
"type": "object",
"properties": {
"created_at": {"type": "string"},
"data": {
"anyOf": [parameter_types.access_ip_v4,
parameter_types.access_ip_v6]},
"description": {"type": "null"},
"domain_id": {"type": "string"},
"id": {"type": "string"},
"name": {"type": "string"},
"priority": {"type": "null"},
"ttl": {"type": "null"},
"type": {"type": "string"},
"updated_at": {
"anyOf": [{'type': 'string'}, {"type": "null"}]}
},
'required': ['id', 'name', 'type', 'data']
}
}
},
'required': ['records']
}
}
create_record = {
"status_code": [200],
"response_body": {
"type": "object",
"properties": {
"record": {
"type": "object",
"properties": {
"created_at": {"type": "string"},
"data": {
"anyOf": [parameter_types.access_ip_v4,
parameter_types.access_ip_v6]},
"description": {"type": "null"},
"domain_id": {"type": "string"},
"id": {"type": "string"},
"name": {"type": "string"},
"priority": {"type": "null"},
"ttl": {"type": "null"},
"type": {"type": "string"},
"updated_at": {"type": "null"}
},
"required": ['id', 'name', 'type', 'domain_id']
}
}
},
"required": ['record']
}
update_record = {
"status_code": [200],
"response_body": {
"type": "object",
"properties": {
"record": {
"type": "object",
"properties": {
"created_at": {"type": "string"},
"data": {
"anyOf": [parameter_types.access_ip_v4,
parameter_types.access_ip_v6]},
"description": {"type": "null"},
"domain_id": {"type": "string"},
"id": {"type": "string"},
"name": {"type": "string"},
"priority": {"type": "null"},
"ttl": {"type": "null"},
"type": {"type": "string"},
"updated_at": {"type": "string"}
},
"required": ['id', 'name', 'type', 'domain_id']
}
}
},
"required": ['record']
}
get_record = {
"status_code": [200],
"response_body": {
"type": "object",
"properties": {
"record": {
"type": "object",
"properties": {
"created_at": {"type": "string"},
"data": {
"anyOf": [parameter_types.access_ip_v4,
parameter_types.access_ip_v6]},
"description": {"type": "null"},
"domain_id": {"type": "string"},
"id": {"type": "string"},
"name": {"type": "string"},
"priority": {"type": "null"},
"ttl": {"type": "null"},
"type": {"type": "string"},
"updated_at": {
"anyOf": [{'type': 'string'}, {"type": "null"}]}
},
"required": ['id', 'name', 'type', 'domain_id']
}
}
},
"required": ['record']
}
delete_record = {
'status_code': [200],
}
|
geoenvo/geonode
|
refs/heads/master
|
geonode/contrib/dynamic/admin.py
|
36
|
from django.contrib.gis import admin
from geonode.contrib.dynamic.models import ModelDescription
for md in ModelDescription.objects.all():
TheModel, TheAdmin = md.get_django_model(with_admin=True)
admin.site.register(TheModel, TheAdmin)
|
seocam/django
|
refs/heads/master
|
tests/generic_views/test_dates.py
|
17
|
from __future__ import unicode_literals
import datetime
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import Book, BookSigning
def _make_books(n, base_date):
for i in range(n):
Book.objects.create(
name='Book %d' % i,
slug='book-%d' % i,
pages=100 + i,
pubdate=base_date - datetime.timedelta(days=i))
@override_settings(ROOT_URLCONF='generic_views.urls')
class ArchiveIndexViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_archive_view(self):
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_context_object_name(self):
res = self.client.get('/dates/books/context_object_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['thingies']), list(Book.objects.all()))
self.assertNotIn('latest', res.context)
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 404)
def test_allow_empty_archive_view(self):
Book.objects.all().delete()
res = self.client.get('/dates/books/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_template(self):
res = self.client.get('/dates/books/template_name/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/list.html')
def test_archive_view_template_suffix(self):
res = self.client.get('/dates/books/template_name_suffix/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_archive_view_invalid(self):
self.assertRaises(ImproperlyConfigured, self.client.get, '/dates/books/invalid/')
def test_archive_view_by_month(self):
res = self.client.get('/dates/books/by_month/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'month', 'DESC')))
def test_paginated_archive_view(self):
_make_books(20, base_date=datetime.date.today())
res = self.client.get('/dates/books/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[0:10]))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
res = self.client.get('/dates/books/paginated/?page=2')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['page_obj'].number, 2)
self.assertEqual(list(res.context['latest']), list(Book.objects.all()[10:20]))
def test_paginated_archive_view_does_not_load_entire_table(self):
# Regression test for #18087
_make_books(20, base_date=datetime.date.today())
# 1 query for years list + 1 query for books
with self.assertNumQueries(2):
self.client.get('/dates/books/')
# same as above + 1 query to test if books exist + 1 query to count them
with self.assertNumQueries(4):
self.client.get('/dates/books/paginated/')
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(2):
self.client.get('/dates/books/reverse/')
def test_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_archive_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted descending in index"""
_make_books(5, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(reversed(sorted(res.context['date_list']))))
def test_archive_view_custom_sorting(self):
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2007, 5, 1))
res = self.client.get('/dates/books/sortedbyname/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.order_by('name').all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
def test_archive_view_custom_sorting_dec(self):
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2007, 5, 1))
res = self.client.get('/dates/books/sortedbynamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), list(Book.objects.dates('pubdate', 'year', 'DESC')))
self.assertEqual(list(res.context['latest']), list(Book.objects.order_by('-name').all()))
self.assertTemplateUsed(res, 'generic_views/book_archive.html')
@override_settings(ROOT_URLCONF='generic_views.urls')
class YearArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_year_view(self):
res = self.client.get('/dates/books/2008/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(res.context['year'], datetime.date(2008, 1, 1))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
# Since allow_empty=False, next/prev years must be valid (#7164)
self.assertEqual(res.context['next_year'], None)
self.assertEqual(res.context['previous_year'], datetime.date(2006, 1, 1))
def test_year_view_make_object_list(self):
res = self.client.get('/dates/books/2006/make_object_list/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 5, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_empty(self):
res = self.client.get('/dates/books/1999/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/1999/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
# Since allow_empty=True, next/prev are allowed to be empty years (#7164)
self.assertEqual(res.context['next_year'], datetime.date(2000, 1, 1))
self.assertEqual(res.context['previous_year'], datetime.date(1998, 1, 1))
def test_year_view_allow_future(self):
# Create a new book in the future
year = datetime.date.today().year + 1
Book.objects.create(name="The New New Testement", pages=600, pubdate=datetime.date(year, 1, 1))
res = self.client.get('/dates/books/%s/' % year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/allow_empty/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
res = self.client.get('/dates/books/%s/allow_future/' % year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(year, 1, 1)])
def test_year_view_paginated(self):
res = self.client.get('/dates/books/2006/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006)))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_custom_sort_order(self):
# Zebras comes after Dreaming by name, but before on '-pubdate' which is the default sorting
Book.objects.create(name="Zebras for Dummies", pages=600, pubdate=datetime.date(2006, 9, 1))
res = self.client.get('/dates/books/2006/sortedbyname/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 5, 1), datetime.date(2006, 9, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006).order_by('name')))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006).order_by('name')))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_two_custom_sort_orders(self):
Book.objects.create(name="Zebras for Dummies", pages=300, pubdate=datetime.date(2006, 9, 1))
Book.objects.create(name="Hunting Hippos", pages=400, pubdate=datetime.date(2006, 3, 1))
res = self.client.get('/dates/books/2006/sortedbypageandnamedec/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [datetime.date(2006, 3, 1), datetime.date(2006, 5, 1), datetime.date(2006, 9, 1)])
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2006).order_by('pages', '-name')))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2006).order_by('pages', '-name')))
self.assertTemplateUsed(res, 'generic_views/book_archive_year.html')
def test_year_view_invalid_pattern(self):
res = self.client.get('/dates/books/no_year/')
self.assertEqual(res.status_code, 404)
def test_no_duplicate_query(self):
# Regression test for #18354
with self.assertNumQueries(4):
self.client.get('/dates/books/2008/reverse/')
def test_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_year_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in year view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
@override_settings(ROOT_URLCONF='generic_views.urls')
class MonthArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_month_view(self):
res = self.client.get('/dates/books/2008/oct/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
self.assertEqual(list(res.context['date_list']), [datetime.date(2008, 10, 1)])
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['month'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev months must be valid (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['date_list']), [])
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['month'], datetime.date(2000, 1, 1))
# Since allow_empty=True, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], datetime.date(2000, 2, 1))
self.assertEqual(res.context['previous_month'], datetime.date(1999, 12, 1))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], None)
def test_month_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60)).replace(day=1)
urlbit = future.strftime('%Y/%b').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['date_list'][0], b.pubdate)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['month'], future)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty months (#7164)
self.assertEqual(res.context['next_month'], None)
self.assertEqual(res.context['previous_month'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month. So next
# should be in the future (yup, #7164, again)
res = self.client.get('/dates/books/2008/oct/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_month'], future)
self.assertEqual(res.context['previous_month'], datetime.date(2006, 5, 1))
def test_month_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/paginated/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10)))
self.assertTemplateUsed(res, 'generic_views/book_archive_month.html')
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/')
self.assertEqual(res.status_code, 200)
def test_month_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/no_month/')
self.assertEqual(res.status_code, 404)
def test_previous_month_without_content(self):
"Content can exist on any day of the previous month. Refs #14711"
self.pubdate_list = [
datetime.date(2010, month, day)
for month, day in ((9, 1), (10, 2), (11, 3))
]
for pubdate in self.pubdate_list:
name = str(pubdate)
Book.objects.create(name=name, slug=name, pages=100, pubdate=pubdate)
res = self.client.get('/dates/books/2010/nov/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The following test demonstrates the bug
res = self.client.get('/dates/books/2010/nov/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 10, 1))
# The bug does not occur here because a Book with pubdate of Sep 1 exists
res = self.client.get('/dates/books/2010/oct/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['previous_month'], datetime.date(2010, 9, 1))
def test_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_month_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 2, 1, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
BookSigning.objects.create(event_date=datetime.datetime(2008, 6, 3, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/')
self.assertEqual(res.status_code, 200)
def test_date_list_order(self):
"""date_list should be sorted ascending in month view"""
_make_books(10, base_date=datetime.date(2011, 12, 25))
res = self.client.get('/dates/books/2011/dec/')
self.assertEqual(list(res.context['date_list']), list(sorted(res.context['date_list'])))
@override_settings(ROOT_URLCONF='generic_views.urls')
class WeekArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_week_view(self):
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
self.assertEqual(res.context['book_list'][0], Book.objects.get(pubdate=datetime.date(2008, 10, 1)))
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
# Since allow_empty=False, next/prev weeks must be valid
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_allow_empty(self):
# allow_empty = False, empty week
res = self.client.get('/dates/books/2008/week/12/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2008/week/12/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['week'], datetime.date(2008, 3, 23))
# Since allow_empty=True, next/prev are allowed to be empty weeks
self.assertEqual(res.context['next_week'], datetime.date(2008, 3, 30))
self.assertEqual(res.context['previous_week'], datetime.date(2008, 3, 16))
# allow_empty but not allow_future: next_week should be empty
url = datetime.date.today().strftime('/dates/books/%Y/week/%U/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], None)
def test_week_view_allow_future(self):
# January 7th always falls in week 1, given Python's definition of week numbers
future = datetime.date(datetime.date.today().year + 1, 1, 7)
future_sunday = future - datetime.timedelta(days=(future.weekday() + 1) % 7)
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/week/1/' % future.year)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/week/1/allow_future/' % future.year)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['week'], future_sunday)
# Since allow_future = True but not allow_empty, next/prev are not
# allowed to be empty weeks
self.assertEqual(res.context['next_week'], None)
self.assertEqual(res.context['previous_week'], datetime.date(2008, 9, 28))
# allow_future, but not allow_empty, with a current week. So next
# should be in the future
res = self.client.get('/dates/books/2008/week/39/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_week'], future_sunday)
self.assertEqual(res.context['previous_week'], datetime.date(2006, 4, 30))
def test_week_view_paginated(self):
week_start = datetime.date(2008, 9, 28)
week_end = week_start + datetime.timedelta(days=7)
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__gte=week_start, pubdate__lt=week_end)))
self.assertTemplateUsed(res, 'generic_views/book_archive_week.html')
def test_week_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/week/no_week/')
self.assertEqual(res.status_code, 404)
def test_week_start_Monday(self):
# Regression for #14752
res = self.client.get('/dates/books/2008/week/39/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 28))
res = self.client.get('/dates/books/2008/week/39/monday/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['week'], datetime.date(2008, 9, 29))
def test_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_week_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/week/13/')
self.assertEqual(res.status_code, 200)
@override_settings(ROOT_URLCONF='generic_views.urls')
class DayArchiveViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_day_view(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
self.assertEqual(list(res.context['book_list']),
list(Book.objects.filter(pubdate=datetime.date(2008, 10, 1))))
self.assertEqual(res.context['day'], datetime.date(2008, 10, 1))
# Since allow_empty=False, next/prev days must be valid.
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
def test_day_view_allow_empty(self):
# allow_empty = False, empty month
res = self.client.get('/dates/books/2000/jan/1/')
self.assertEqual(res.status_code, 404)
# allow_empty = True, empty month
res = self.client.get('/dates/books/2000/jan/1/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [])
self.assertEqual(res.context['day'], datetime.date(2000, 1, 1))
# Since it's allow empty, next/prev are allowed to be empty months (#7164)
self.assertEqual(res.context['next_day'], datetime.date(2000, 1, 2))
self.assertEqual(res.context['previous_day'], datetime.date(1999, 12, 31))
# allow_empty but not allow_future: next_month should be empty (#7164)
url = datetime.date.today().strftime('/dates/books/%Y/%b/%d/allow_empty/').lower()
res = self.client.get(url)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], None)
def test_day_view_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", pages=600, pubdate=future)
# allow_future = False, future month
res = self.client.get('/dates/books/%s/' % urlbit)
self.assertEqual(res.status_code, 404)
# allow_future = True, valid future month
res = self.client.get('/dates/books/%s/allow_future/' % urlbit)
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), [b])
self.assertEqual(res.context['day'], future)
# allow_future but not allow_empty, next/prev must be valid
self.assertEqual(res.context['next_day'], None)
self.assertEqual(res.context['previous_day'], datetime.date(2008, 10, 1))
# allow_future, but not allow_empty, with a current month.
res = self.client.get('/dates/books/2008/oct/01/allow_future/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['next_day'], future)
self.assertEqual(res.context['previous_day'], datetime.date(2006, 5, 1))
# allow_future for yesterday, next_day is today (#17192)
today = datetime.date.today()
yesterday = today - datetime.timedelta(days=1)
res = self.client.get('/dates/books/%s/allow_empty_and_future/'
% yesterday.strftime('%Y/%b/%d').lower())
self.assertEqual(res.context['next_day'], today)
def test_day_view_paginated(self):
res = self.client.get('/dates/books/2008/oct/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(list(res.context['book_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertEqual(list(res.context['object_list']), list(Book.objects.filter(pubdate__year=2008, pubdate__month=10, pubdate__day=1)))
self.assertTemplateUsed(res, 'generic_views/book_archive_day.html')
def test_next_prev_context(self):
res = self.client.get('/dates/books/2008/oct/01/')
self.assertEqual(res.content, b"Archive for Oct. 1, 2008. Previous day is May 1, 2006\n")
def test_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/')
self.assertEqual(res.status_code, 200)
def test_day_view_invalid_pattern(self):
res = self.client.get('/dates/books/2007/oct/no_day/')
self.assertEqual(res.status_code, 404)
def test_today_view(self):
res = self.client.get('/dates/books/today/')
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/today/allow_empty/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['day'], datetime.date.today())
def test_datetime_day_view(self):
BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_day_view(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/')
self.assertEqual(res.status_code, 404)
@override_settings(ROOT_URLCONF='generic_views.urls')
class DateDetailViewTests(TestCase):
fixtures = ['generic-views-test-data.json']
def test_date_detail_by_pk(self):
res = self.client.get('/dates/books/2008/oct/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=1))
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_date_detail_by_slug(self):
res = self.client.get('/dates/books/2006/may/01/byslug/dreaming-in-code/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(slug='dreaming-in-code'))
def test_date_detail_custom_month_format(self):
res = self.client.get('/dates/books/2008/10/01/1/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], Book.objects.get(pk=1))
def test_date_detail_allow_future(self):
future = (datetime.date.today() + datetime.timedelta(days=60))
urlbit = future.strftime('%Y/%b/%d').lower()
b = Book.objects.create(name="The New New Testement", slug="new-new", pages=600, pubdate=future)
res = self.client.get('/dates/books/%s/new-new/' % urlbit)
self.assertEqual(res.status_code, 404)
res = self.client.get('/dates/books/%s/%s/allow_future/' % (urlbit, b.id))
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['book'], b)
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
def test_invalid_url(self):
self.assertRaises(AttributeError, self.client.get, "/dates/books/2008/oct/01/nopk/")
def test_get_object_custom_queryset(self):
"""
Ensure that custom querysets are used when provided to
BaseDateDetailView.get_object()
Refs #16918.
"""
res = self.client.get(
'/dates/books/get_object_custom_queryset/2006/may/01/2/')
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Book.objects.get(pk=2))
self.assertEqual(res.context['book'], Book.objects.get(pk=2))
self.assertTemplateUsed(res, 'generic_views/book_detail.html')
res = self.client.get(
'/dates/books/get_object_custom_queryset/2008/oct/01/1/')
self.assertEqual(res.status_code, 404)
def test_get_object_custom_queryset_numqueries(self):
with self.assertNumQueries(1):
self.client.get('/dates/books/get_object_custom_queryset/2006/may/01/2/')
def test_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
@requires_tz_support
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Nairobi')
def test_aware_datetime_date_detail(self):
bs = BookSigning.objects.create(event_date=datetime.datetime(2008, 4, 2, 12, 0, tzinfo=timezone.utc))
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-02T00:00:00+03:00 (beginning of day) > 2008-04-01T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 1, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 200)
# 2008-04-03T00:00:00+03:00 (end of day) > 2008-04-02T22:00:00+00:00 (book signing event date)
bs.event_date = datetime.datetime(2008, 4, 2, 22, 0, tzinfo=timezone.utc)
bs.save()
res = self.client.get('/dates/booksignings/2008/apr/2/%d/' % bs.pk)
self.assertEqual(res.status_code, 404)
|
xiaoxiamii/scikit-learn
|
refs/heads/master
|
examples/exercises/plot_iris_exercise.py
|
323
|
"""
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
|
btallman/incubator-airflow
|
refs/heads/master
|
scripts/perf/scheduler_ops_metrics.py
|
30
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import pandas as pd
import sys
from airflow import configuration, settings
from airflow.jobs import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerMetricsJob'
}
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = filter(lambda x: x.state == State.SUCCESS, tis)
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(datetime.today() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(datetime.now()-self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if (len(successful_tis) == num_task_instances):
self.logger.info("All tasks processed! Printing stats.")
else:
self.logger.info("Test timeout reached. "
"Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: {}'.format(dr))
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit()
def main():
configuration.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR)
job.run()
if __name__ == "__main__":
main()
|
MQQiang/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/pydoc_data/__init__.py
|
12133432
| |
jabesq/home-assistant
|
refs/heads/dev
|
homeassistant/components/cloud/http_api.py
|
1
|
"""The HTTP api to control the cloud integration."""
import asyncio
from functools import wraps
import logging
import attr
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import (
RequestDataValidator)
from homeassistant.components import websocket_api
from homeassistant.components.websocket_api import const as ws_const
from homeassistant.components.alexa import (
entities as alexa_entities,
errors as alexa_errors,
)
from homeassistant.components.google_assistant import helpers as google_helpers
from .const import (
DOMAIN, REQUEST_TIMEOUT, PREF_ENABLE_ALEXA, PREF_ENABLE_GOOGLE,
PREF_GOOGLE_SECURE_DEVICES_PIN, InvalidTrustedNetworks,
InvalidTrustedProxies, PREF_ALEXA_REPORT_STATE, RequireRelink)
_LOGGER = logging.getLogger(__name__)
WS_TYPE_STATUS = 'cloud/status'
SCHEMA_WS_STATUS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_STATUS,
})
WS_TYPE_SUBSCRIPTION = 'cloud/subscription'
SCHEMA_WS_SUBSCRIPTION = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_SUBSCRIPTION,
})
WS_TYPE_HOOK_CREATE = 'cloud/cloudhook/create'
SCHEMA_WS_HOOK_CREATE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_HOOK_CREATE,
vol.Required('webhook_id'): str
})
WS_TYPE_HOOK_DELETE = 'cloud/cloudhook/delete'
SCHEMA_WS_HOOK_DELETE = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend({
vol.Required('type'): WS_TYPE_HOOK_DELETE,
vol.Required('webhook_id'): str
})
_CLOUD_ERRORS = {
InvalidTrustedNetworks:
(500, 'Remote UI not compatible with 127.0.0.1/::1'
' as a trusted network.'),
InvalidTrustedProxies:
(500, 'Remote UI not compatible with 127.0.0.1/::1'
' as trusted proxies.'),
}
async def async_setup(hass):
"""Initialize the HTTP API."""
hass.components.websocket_api.async_register_command(
WS_TYPE_STATUS, websocket_cloud_status,
SCHEMA_WS_STATUS
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SUBSCRIPTION, websocket_subscription,
SCHEMA_WS_SUBSCRIPTION
)
hass.components.websocket_api.async_register_command(
websocket_update_prefs)
hass.components.websocket_api.async_register_command(
WS_TYPE_HOOK_CREATE, websocket_hook_create,
SCHEMA_WS_HOOK_CREATE
)
hass.components.websocket_api.async_register_command(
WS_TYPE_HOOK_DELETE, websocket_hook_delete,
SCHEMA_WS_HOOK_DELETE
)
hass.components.websocket_api.async_register_command(
websocket_remote_connect)
hass.components.websocket_api.async_register_command(
websocket_remote_disconnect)
hass.components.websocket_api.async_register_command(
google_assistant_list)
hass.components.websocket_api.async_register_command(
google_assistant_update)
hass.components.websocket_api.async_register_command(alexa_list)
hass.components.websocket_api.async_register_command(alexa_update)
hass.components.websocket_api.async_register_command(alexa_sync)
hass.http.register_view(GoogleActionsSyncView)
hass.http.register_view(CloudLoginView)
hass.http.register_view(CloudLogoutView)
hass.http.register_view(CloudRegisterView)
hass.http.register_view(CloudResendConfirmView)
hass.http.register_view(CloudForgotPasswordView)
from hass_nabucasa import auth
_CLOUD_ERRORS.update({
auth.UserNotFound:
(400, "User does not exist."),
auth.UserNotConfirmed:
(400, 'Email not confirmed.'),
auth.UserExists:
(400, 'An account with the given email already exists.'),
auth.Unauthenticated:
(401, 'Authentication failed.'),
auth.PasswordChangeRequired:
(400, 'Password change required.'),
asyncio.TimeoutError:
(502, 'Unable to reach the Home Assistant cloud.'),
aiohttp.ClientError:
(500, 'Error making internal request'),
})
def _handle_cloud_errors(handler):
"""Webview decorator to handle auth errors."""
@wraps(handler)
async def error_handler(view, request, *args, **kwargs):
"""Handle exceptions that raise from the wrapped request handler."""
try:
result = await handler(view, request, *args, **kwargs)
return result
except Exception as err: # pylint: disable=broad-except
status, msg = _process_cloud_exception(err, request.path)
return view.json_message(
msg, status_code=status,
message_code=err.__class__.__name__.lower())
return error_handler
def _ws_handle_cloud_errors(handler):
"""Websocket decorator to handle auth errors."""
@wraps(handler)
async def error_handler(hass, connection, msg):
"""Handle exceptions that raise from the wrapped handler."""
try:
return await handler(hass, connection, msg)
except Exception as err: # pylint: disable=broad-except
err_status, err_msg = _process_cloud_exception(err, msg['type'])
connection.send_error(msg['id'], err_status, err_msg)
return error_handler
def _process_cloud_exception(exc, where):
"""Process a cloud exception."""
err_info = _CLOUD_ERRORS.get(exc.__class__)
if err_info is None:
_LOGGER.exception(
"Unexpected error processing request for %s", where)
err_info = (502, 'Unexpected error: {}'.format(exc))
return err_info
class GoogleActionsSyncView(HomeAssistantView):
"""Trigger a Google Actions Smart Home Sync."""
url = '/api/cloud/google_actions/sync'
name = 'api:cloud:google_actions/sync'
@_handle_cloud_errors
async def post(self, request):
"""Trigger a Google Actions sync."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
websession = hass.helpers.aiohttp_client.async_get_clientsession()
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(cloud.auth.check_token)
with async_timeout.timeout(REQUEST_TIMEOUT):
req = await websession.post(
cloud.google_actions_sync_url, headers={
'authorization': cloud.id_token
})
return self.json({}, status_code=req.status)
class CloudLoginView(HomeAssistantView):
"""Login to Home Assistant cloud."""
url = '/api/cloud/login'
name = 'api:cloud:login'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
vol.Required('password'): str,
}))
async def post(self, request, data):
"""Handle login request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(cloud.auth.login, data['email'],
data['password'])
hass.async_add_job(cloud.iot.connect)
return self.json({'success': True})
class CloudLogoutView(HomeAssistantView):
"""Log out of the Home Assistant cloud."""
url = '/api/cloud/logout'
name = 'api:cloud:logout'
@_handle_cloud_errors
async def post(self, request):
"""Handle logout request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await cloud.logout()
return self.json_message('ok')
class CloudRegisterView(HomeAssistantView):
"""Register on the Home Assistant cloud."""
url = '/api/cloud/register'
name = 'api:cloud:register'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
vol.Required('password'): vol.All(str, vol.Length(min=6)),
}))
async def post(self, request, data):
"""Handle registration request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(
cloud.auth.register, data['email'], data['password'])
return self.json_message('ok')
class CloudResendConfirmView(HomeAssistantView):
"""Resend email confirmation code."""
url = '/api/cloud/resend_confirm'
name = 'api:cloud:resend_confirm'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
}))
async def post(self, request, data):
"""Handle resending confirm email code request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(
cloud.auth.resend_email_confirm, data['email'])
return self.json_message('ok')
class CloudForgotPasswordView(HomeAssistantView):
"""View to start Forgot Password flow.."""
url = '/api/cloud/forgot_password'
name = 'api:cloud:forgot_password'
@_handle_cloud_errors
@RequestDataValidator(vol.Schema({
vol.Required('email'): str,
}))
async def post(self, request, data):
"""Handle forgot password request."""
hass = request.app['hass']
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
await hass.async_add_job(
cloud.auth.forgot_password, data['email'])
return self.json_message('ok')
@callback
def websocket_cloud_status(hass, connection, msg):
"""Handle request for account info.
Async friendly.
"""
cloud = hass.data[DOMAIN]
connection.send_message(
websocket_api.result_message(msg['id'], _account_data(cloud)))
def _require_cloud_login(handler):
"""Websocket decorator that requires cloud to be logged in."""
@wraps(handler)
def with_cloud_auth(hass, connection, msg):
"""Require to be logged into the cloud."""
cloud = hass.data[DOMAIN]
if not cloud.is_logged_in:
connection.send_message(websocket_api.error_message(
msg['id'], 'not_logged_in',
'You need to be logged in to the cloud.'))
return
handler(hass, connection, msg)
return with_cloud_auth
@_require_cloud_login
@websocket_api.async_response
async def websocket_subscription(hass, connection, msg):
"""Handle request for account info."""
from hass_nabucasa.const import STATE_DISCONNECTED
cloud = hass.data[DOMAIN]
with async_timeout.timeout(REQUEST_TIMEOUT):
response = await cloud.fetch_subscription_info()
if response.status != 200:
connection.send_message(websocket_api.error_message(
msg['id'], 'request_failed', 'Failed to request subscription'))
data = await response.json()
# Check if a user is subscribed but local info is outdated
# In that case, let's refresh and reconnect
if data.get('provider') and not cloud.is_connected:
_LOGGER.debug(
"Found disconnected account with valid subscriotion, connecting")
await hass.async_add_executor_job(cloud.auth.renew_access_token)
# Cancel reconnect in progress
if cloud.iot.state != STATE_DISCONNECTED:
await cloud.iot.disconnect()
hass.async_create_task(cloud.iot.connect())
connection.send_message(websocket_api.result_message(msg['id'], data))
@_require_cloud_login
@websocket_api.async_response
@websocket_api.websocket_command({
vol.Required('type'): 'cloud/update_prefs',
vol.Optional(PREF_ENABLE_GOOGLE): bool,
vol.Optional(PREF_ENABLE_ALEXA): bool,
vol.Optional(PREF_ALEXA_REPORT_STATE): bool,
vol.Optional(PREF_GOOGLE_SECURE_DEVICES_PIN): vol.Any(None, str),
})
async def websocket_update_prefs(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop('id')
changes.pop('type')
# If we turn alexa linking on, validate that we can fetch access token
if changes.get(PREF_ALEXA_REPORT_STATE):
try:
with async_timeout.timeout(10):
await cloud.client.alexa_config.async_get_access_token()
except asyncio.TimeoutError:
connection.send_error(msg['id'], 'alexa_timeout',
'Timeout validating Alexa access token.')
return
except (alexa_errors.NoTokenAvailable, RequireRelink):
connection.send_error(
msg['id'], 'alexa_relink',
'Please go to the Alexa app and re-link the Home Assistant '
'skill and then try to enable state reporting.'
)
return
await cloud.client.prefs.async_update(**changes)
connection.send_message(websocket_api.result_message(msg['id']))
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
async def websocket_hook_create(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
hook = await cloud.cloudhooks.async_create(msg['webhook_id'], False)
connection.send_message(websocket_api.result_message(msg['id'], hook))
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
async def websocket_hook_delete(hass, connection, msg):
"""Handle request for account info."""
cloud = hass.data[DOMAIN]
await cloud.cloudhooks.async_delete(msg['webhook_id'])
connection.send_message(websocket_api.result_message(msg['id']))
def _account_data(cloud):
"""Generate the auth data JSON response."""
from hass_nabucasa.const import STATE_DISCONNECTED
if not cloud.is_logged_in:
return {
'logged_in': False,
'cloud': STATE_DISCONNECTED,
}
claims = cloud.claims
client = cloud.client
remote = cloud.remote
# Load remote certificate
if remote.certificate:
certificate = attr.asdict(remote.certificate)
else:
certificate = None
return {
'logged_in': True,
'email': claims['email'],
'cloud': cloud.iot.state,
'prefs': client.prefs.as_dict(),
'google_entities': client.google_user_config['filter'].config,
'alexa_entities': client.alexa_user_config['filter'].config,
'remote_domain': remote.instance_domain,
'remote_connected': remote.is_connected,
'remote_certificate': certificate,
}
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({
'type': 'cloud/remote/connect'
})
async def websocket_remote_connect(hass, connection, msg):
"""Handle request for connect remote."""
cloud = hass.data[DOMAIN]
await cloud.client.prefs.async_update(remote_enabled=True)
await cloud.remote.connect()
connection.send_result(msg['id'], _account_data(cloud))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({
'type': 'cloud/remote/disconnect'
})
async def websocket_remote_disconnect(hass, connection, msg):
"""Handle request for disconnect remote."""
cloud = hass.data[DOMAIN]
await cloud.client.prefs.async_update(remote_enabled=False)
await cloud.remote.disconnect()
connection.send_result(msg['id'], _account_data(cloud))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({
'type': 'cloud/google_assistant/entities'
})
async def google_assistant_list(hass, connection, msg):
"""List all google assistant entities."""
cloud = hass.data[DOMAIN]
entities = google_helpers.async_get_entities(
hass, cloud.client.google_config
)
result = []
for entity in entities:
result.append({
'entity_id': entity.entity_id,
'traits': [trait.name for trait in entity.traits()],
'might_2fa': entity.might_2fa(),
})
connection.send_result(msg['id'], result)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({
'type': 'cloud/google_assistant/entities/update',
'entity_id': str,
vol.Optional('should_expose'): bool,
vol.Optional('override_name'): str,
vol.Optional('aliases'): [str],
vol.Optional('disable_2fa'): bool,
})
async def google_assistant_update(hass, connection, msg):
"""Update google assistant config."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop('type')
changes.pop('id')
await cloud.client.prefs.async_update_google_entity_config(**changes)
connection.send_result(
msg['id'],
cloud.client.prefs.google_entity_configs.get(msg['entity_id']))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({
'type': 'cloud/alexa/entities'
})
async def alexa_list(hass, connection, msg):
"""List all alexa entities."""
cloud = hass.data[DOMAIN]
entities = alexa_entities.async_get_entities(
hass, cloud.client.alexa_config
)
result = []
for entity in entities:
result.append({
'entity_id': entity.entity_id,
'display_categories': entity.default_display_categories(),
'interfaces': [ifc.name() for ifc in entity.interfaces()],
})
connection.send_result(msg['id'], result)
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@_ws_handle_cloud_errors
@websocket_api.websocket_command({
'type': 'cloud/alexa/entities/update',
'entity_id': str,
vol.Optional('should_expose'): bool,
})
async def alexa_update(hass, connection, msg):
"""Update alexa entity config."""
cloud = hass.data[DOMAIN]
changes = dict(msg)
changes.pop('type')
changes.pop('id')
await cloud.client.prefs.async_update_alexa_entity_config(**changes)
connection.send_result(
msg['id'],
cloud.client.prefs.alexa_entity_configs.get(msg['entity_id']))
@websocket_api.require_admin
@_require_cloud_login
@websocket_api.async_response
@websocket_api.websocket_command({
'type': 'cloud/alexa/sync',
})
async def alexa_sync(hass, connection, msg):
"""Sync with Alexa."""
cloud = hass.data[DOMAIN]
with async_timeout.timeout(10):
try:
success = await cloud.client.alexa_config.async_sync_entities()
except alexa_errors.NoTokenAvailable:
connection.send_error(
msg['id'], 'alexa_relink',
'Please go to the Alexa app and re-link the Home Assistant '
'skill.'
)
return
if success:
connection.send_result(msg['id'])
else:
connection.send_error(
msg['id'], ws_const.ERR_UNKNOWN_ERROR, 'Unknown error')
|
kawamon/hue
|
refs/heads/master
|
desktop/core/ext-py/SQLAlchemy-1.3.17/examples/join_conditions/__init__.py
|
7
|
"""Examples of various :func:`.orm.relationship` configurations,
which make use of the ``primaryjoin`` argument to compose special types
of join conditions.
.. autosource::
"""
|
olafdietsche/scrapy
|
refs/heads/master
|
tests/test_spiderloader/__init__.py
|
107
|
import sys
import os
import shutil
from zope.interface.verify import verifyObject
from twisted.trial import unittest
# ugly hack to avoid cyclic imports of scrapy.spiders when running this test
# alone
from scrapy.interfaces import ISpiderLoader
from scrapy.spiderloader import SpiderLoader
from scrapy.settings import Settings
from scrapy.http import Request
module_dir = os.path.dirname(os.path.abspath(__file__))
class SpiderLoaderTest(unittest.TestCase):
def setUp(self):
orig_spiders_dir = os.path.join(module_dir, 'test_spiders')
self.tmpdir = self.mktemp()
os.mkdir(self.tmpdir)
self.spiders_dir = os.path.join(self.tmpdir, 'test_spiders_xxx')
shutil.copytree(orig_spiders_dir, self.spiders_dir)
sys.path.append(self.tmpdir)
settings = Settings({'SPIDER_MODULES': ['test_spiders_xxx']})
self.spider_loader = SpiderLoader.from_settings(settings)
def tearDown(self):
del self.spider_loader
del sys.modules['test_spiders_xxx']
sys.path.remove(self.tmpdir)
def test_interface(self):
verifyObject(ISpiderLoader, self.spider_loader)
def test_list(self):
self.assertEqual(set(self.spider_loader.list()),
set(['spider1', 'spider2', 'spider3']))
def test_load(self):
spider1 = self.spider_loader.load("spider1")
self.assertEqual(spider1.__name__, 'Spider1')
def test_find_by_request(self):
self.assertEqual(self.spider_loader.find_by_request(Request('http://scrapy1.org/test')),
['spider1'])
self.assertEqual(self.spider_loader.find_by_request(Request('http://scrapy2.org/test')),
['spider2'])
self.assertEqual(set(self.spider_loader.find_by_request(Request('http://scrapy3.org/test'))),
set(['spider1', 'spider2']))
self.assertEqual(self.spider_loader.find_by_request(Request('http://scrapy999.org/test')),
[])
self.assertEqual(self.spider_loader.find_by_request(Request('http://spider3.com')),
[])
self.assertEqual(self.spider_loader.find_by_request(Request('http://spider3.com/onlythis')),
['spider3'])
def test_load_spider_module(self):
module = 'tests.test_spiderloader.test_spiders.spider1'
settings = Settings({'SPIDER_MODULES': [module]})
self.spider_loader = SpiderLoader.from_settings(settings)
assert len(self.spider_loader._spiders) == 1
def test_load_spider_module(self):
prefix = 'tests.test_spiderloader.test_spiders.'
module = ','.join(prefix + s for s in ('spider1', 'spider2'))
settings = Settings({'SPIDER_MODULES': module})
self.spider_loader = SpiderLoader.from_settings(settings)
assert len(self.spider_loader._spiders) == 2
def test_load_base_spider(self):
module = 'tests.test_spiderloader.test_spiders.spider0'
settings = Settings({'SPIDER_MODULES': [module]})
self.spider_loader = SpiderLoader.from_settings(settings)
assert len(self.spider_loader._spiders) == 0
|
rotofly/odoo
|
refs/heads/master
|
addons/product_extended/wizard/__init__.py
|
374
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard_price
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ssssam/rdflib
|
refs/heads/master
|
rdflib/plugins/parsers/hturtle.py
|
24
|
# -*- coding: utf-8 -*-
"""
Extraction parser RDF embedded verbatim into HTML or XML files. This is based
on:
* The specification on embedding turtle into html:
http://www.w3.org/TR/turtle/#in-html
For SVG (and currently SVG only) the method also extracts an embedded RDF/XML
data, per SVG specification
License: W3C Software License,
http://www.w3.org/Consortium/Legal/copyright-software
Author: Ivan Herman
Copyright: W3C
"""
from rdflib.parser import Parser
from .pyRdfa import pyRdfa, Options
from .pyRdfa.state import ExecutionContext
from .pyRdfa.embeddedRDF import handle_embeddedRDF
from .structureddata import _get_orig_source, _check_error
try:
import html5lib
assert html5lib
html5lib = True
except ImportError:
import warnings
warnings.warn(
'html5lib not found! RDFa and Microdata parsers ' +
'will not be available.')
html5lib = False
class HTurtle(pyRdfa):
"""
Bastardizing the RDFa 1.1 parser to do a hturtle extractions
"""
def __init__(self, options=None, base="", media_type=""):
pyRdfa.__init__(self, options=options, base=base,
media_type=media_type, rdfa_version="1.1")
def graph_from_DOM(self, dom, graph, pgraph=None):
"""
Stealing the parsing function from the original class, to do
turtle extraction only
"""
def copyGraph(tog, fromg):
for t in fromg:
tog.add(t)
for k, ns in fromg.namespaces():
tog.bind(k, ns)
def _process_one_node(node, graph, state):
if handle_embeddedRDF(node, graph, state):
# we got an RDF content that has been extracted into Graph;
# the recursion should stop
return
else:
# recurse through all the child elements of the current node
for n in node.childNodes:
if n.nodeType == node.ELEMENT_NODE:
_process_one_node(n, graph, state)
topElement = dom.documentElement
state = ExecutionContext(topElement, graph, base=self.base,
options=self.options, rdfa_version="1.1")
_process_one_node(topElement, graph, state)
if pgraph is not None:
copyGraph(pgraph, self.options.processor_graph.graph)
# This is the parser interface as it would look when called from the rest of
# RDFLib
class HTurtleParser(Parser):
def parse(self, source, graph, pgraph=None, media_type=""):
"""
@param source: one of the input sources that the RDFLib package defined
@type source: InputSource class instance
@param graph: target graph for the triples; output graph, in RDFa spec.
parlance
@type graph: RDFLib Graph
@keyword media_type: explicit setting of the preferred media type
(a.k.a. content type) of the the RDFa source. None means the content
type of the HTTP result is used, or a guess is made based on the
suffix of a file
@type media_type: string
"""
if html5lib is False:
raise ImportError(
'html5lib is not installed, cannot ' +
'use RDFa and Microdata parsers.')
(baseURI, orig_source) = _get_orig_source(source)
self._process(
graph, pgraph, baseURI, orig_source, media_type=media_type)
def _process(self, graph, baseURI, orig_source, media_type=""):
self.options = Options(output_processor_graph=None,
embedded_rdf=True,
vocab_expansion=False,
vocab_cache=False)
if media_type is None:
media_type = ""
processor = HTurtle(
self.options, base=baseURI, media_type=media_type)
processor.graph_from_source(
orig_source, graph=graph, pgraph=None, rdfOutput=False)
# get possible error triples to raise exceptions
_check_error(graph)
|
ff94315/hiwifi-openwrt-HC5661-HC5761
|
refs/heads/master
|
staging_dir/host/lib/python2.7/lib-tk/test/test_tkinter/__init__.py
|
12133432
| |
jeroenj/CouchPotatoServer
|
refs/heads/master
|
couchpotato/core/media/movie/providers/metadata/__init__.py
|
12133432
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.