repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
explora26/zephyr
|
refs/heads/master
|
scripts/gen_idt.py
|
2
|
#!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
import argparse
import sys
import struct
import os
import elftools
from distutils.version import LooseVersion
from elftools.elf.elffile import ELFFile
from elftools.elf.sections import SymbolTableSection
if LooseVersion(elftools.__version__) < LooseVersion('0.24'):
sys.stderr.write("pyelftools is out of date, need version 0.24 or later\n")
sys.exit(1)
# This will never change, first selector in the GDT after the null selector
KERNEL_CODE_SEG = 0x08
# These exception vectors push an error code onto the stack.
ERR_CODE_VECTORS = [8, 10, 11, 12, 13, 14, 17]
def debug(text):
if not args.verbose:
return
sys.stdout.write(os.path.basename(sys.argv[0]) + ": " + text + "\n")
def error(text):
sys.stderr.write(os.path.basename(sys.argv[0]) + ": " + text + "\n")
sys.exit(1)
# See Section 6.11 of the Intel Architecture Software Developer's Manual
gate_desc_format = "<HHBBH"
def create_irq_gate(handler, dpl):
present = 1
gate_type = 0xE # 32-bit interrupt gate
type_attr = gate_type | (dpl << 5) | (present << 7)
offset_hi = handler >> 16
offset_lo = handler & 0xFFFF
data = struct.pack(gate_desc_format, offset_lo, KERNEL_CODE_SEG, 0,
type_attr, offset_hi)
return data
def create_task_gate(tss, dpl):
present = 1
gate_type = 0x5 # 32-bit task gate
type_attr = gate_type | (dpl << 5) | (present << 7)
data = struct.pack(gate_desc_format, 0, tss, 0, type_attr, 0)
return data
def create_idt_binary(idt_config, filename):
with open(filename, "wb") as fp:
for handler, tss, dpl in idt_config:
if handler and tss:
error("entry specifies both handler function and tss")
if not handler and not tss:
error("entry does not specify either handler or tss")
if handler:
data = create_irq_gate(handler, dpl)
else:
data = create_task_gate(tss, dpl)
fp.write(data)
map_fmt = "<B"
def create_irq_vec_map_binary(irq_vec_map, filename):
with open(filename, "wb") as fp:
for i in irq_vec_map:
fp.write(struct.pack(map_fmt, i))
def priority_range(prio):
# Priority levels are represented as groups of 16 vectors within the IDT
base = 32 + (prio * 16)
return range(base, base + 16)
def update_irq_vec_map(irq_vec_map, irq, vector, max_irq):
# No IRQ associated; exception or software interrupt
if irq == -1:
return
if irq >= max_irq:
error("irq %d specified, but CONFIG_MAX_IRQ_LINES is %d" %
(irq, max_irq))
# This table will never have values less than 32 since those are for
# exceptions; 0 means unconfigured
if irq_vec_map[irq] != 0:
error("multiple vector assignments for interrupt line %d", irq)
debug("assign IRQ %d to vector %d" % (irq, vector))
irq_vec_map[irq] = vector
def setup_idt(spur_code, spur_nocode, intlist, max_vec, max_irq):
irq_vec_map = [0 for i in range(max_irq)]
vectors = [None for i in range(max_vec)]
# Pass 1: sanity check and set up hard-coded interrupt vectors
for handler, irq, prio, vec, dpl, tss in intlist:
if vec == -1:
if prio == -1:
error("entry does not specify vector or priority level")
continue
if vec >= max_vec:
error("Vector %d specified, but size of IDT is only %d vectors" %
(vec, max_vec))
if vectors[vec] is not None:
error("Multiple assignments for vector %d" % vec)
vectors[vec] = (handler, tss, dpl)
update_irq_vec_map(irq_vec_map, irq, vec, max_irq)
# Pass 2: set up priority-based interrupt vectors
for handler, irq, prio, vec, dpl, tss in intlist:
if vec != -1:
continue
for vi in priority_range(prio):
if vi >= max_vec:
break
if vectors[vi] is None:
vec = vi
break
if vec == -1:
error("can't find a free vector in priority level %d" % prio)
vectors[vec] = (handler, tss, dpl)
update_irq_vec_map(irq_vec_map, irq, vec, max_irq)
# Pass 3: fill in unused vectors with spurious handler at dpl=0
for i in range(max_vec):
if vectors[i] is not None:
continue
if i in ERR_CODE_VECTORS:
handler = spur_code
else:
handler = spur_nocode
vectors[i] = (handler, 0, 0)
return vectors, irq_vec_map
def get_symbols(obj):
for section in obj.iter_sections():
if isinstance(section, SymbolTableSection):
return {sym.name: sym.entry.st_value
for sym in section.iter_symbols()}
raise LookupError("Could not find symbol table")
# struct genidt_header_s {
# uint32_t spurious_addr;
# uint32_t spurious_no_error_addr;
# int32_t num_entries;
# };
intlist_header_fmt = "<II"
# struct genidt_entry_s {
# uint32_t isr;
# int32_t irq;
# int32_t priority;
# int32_t vector_id;
# int32_t dpl;
# int32_t tss;
# };
intlist_entry_fmt = "<Iiiiii"
def get_intlist(elf):
intdata = elf.get_section_by_name("intList").data()
header_sz = struct.calcsize(intlist_header_fmt)
header = struct.unpack_from(intlist_header_fmt, intdata, 0)
intdata = intdata[header_sz:]
spurious_code = header[0]
spurious_nocode = header[1]
debug("spurious handler (code) : %s" % hex(header[0]))
debug("spurious handler (no code) : %s" % hex(header[1]))
intlist = [i for i in
struct.iter_unpack(intlist_entry_fmt, intdata)]
debug("Configured interrupt routing")
debug("handler irq pri vec dpl")
debug("--------------------------")
for irq in intlist:
debug("{0:<10} {1:<3} {2:<3} {3:<3} {4:<2}".format(
hex(irq[0]),
"-" if irq[1] == -1 else irq[1],
"-" if irq[2] == -1 else irq[2],
"-" if irq[3] == -1 else irq[3],
irq[4]))
return (spurious_code, spurious_nocode, intlist)
def parse_args():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-m", "--vector-map", required=True,
help="Output file mapping IRQ lines to IDT vectors")
parser.add_argument("-o", "--output-idt", required=True,
help="Output file containing IDT binary")
parser.add_argument("-a", "--output-vectors-alloc", required=False,
help="Output file indicating allocated vectors")
parser.add_argument("-k", "--kernel", required=True,
help="Zephyr kernel image")
parser.add_argument("-v", "--verbose", action="store_true",
help="Print extra debugging information")
args = parser.parse_args()
if "VERBOSE" in os.environ:
args.verbose = 1
def create_irq_vectors_allocated(vectors, spur_code, spur_nocode, filename):
# Construct a bitfield over all the IDT vectors, where if bit n is 1,
# that vector is free. those vectors have either of the two spurious
# interrupt handlers installed, they are free for runtime installation
# of interrupts
num_chars = (len(vectors) + 7) // 8
vbits = [0 for i in range(num_chars)]
for i in range(len(vectors)):
handler, _, _ = vectors[i]
if handler != spur_code and handler != spur_nocode:
continue
vbit_index = i // 8
vbit_val = 1 << (i % 8)
vbits[vbit_index] = vbits[vbit_index] | vbit_val
with open(filename, "wb") as fp:
for char in vbits:
fp.write(struct.pack("<B", char))
def main():
parse_args()
with open(args.kernel, "rb") as fp:
kernel = ELFFile(fp)
syms = get_symbols(kernel)
spur_code, spur_nocode, intlist = get_intlist(kernel)
max_irq = syms["CONFIG_MAX_IRQ_LINES"]
max_vec = syms["CONFIG_IDT_NUM_VECTORS"]
vectors, irq_vec_map = setup_idt(spur_code, spur_nocode, intlist, max_vec,
max_irq)
create_idt_binary(vectors, args.output_idt)
create_irq_vec_map_binary(irq_vec_map, args.vector_map)
if args.output_vectors_alloc:
create_irq_vectors_allocated(vectors, spur_code, spur_nocode,
args.output_vectors_alloc)
if __name__ == "__main__":
main()
|
Lilykos/inspire-next
|
refs/heads/master
|
inspire/modules/harvester/tasks/world_scientific.py
|
2
|
# -*- coding: utf-8 -*-
#
## This file is part of INSPIRE.
## Copyright (C) 2014, 2015 CERN.
##
## INSPIRE is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## INSPIRE is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this license, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
from os.path import (
join,
basename,
)
from functools import wraps
from datetime import datetime
from zipfile import BadZipfile
from flask import render_template
from harvestingkit.world_scientific_package import WorldScientific
from invenio.ext.email import send_email
from invenio.base.globals import cfg
from inspire.modules.harvester.utils import (
unzip,
ftp_upload,
ftp_download_files,
get_storage_path,
get_netrc
)
def get_files_from_ftp(server, source_folder, target_folder):
"""Get all files in given folder on FTP server to target folder.
The paths to the retrieved files are put in data["downloaded_files"].
"""
@wraps(get_files_from_ftp)
def _get_files_from_ftp(obj, eng):
target_folder_full = get_storage_path(suffix=target_folder)
obj.data['all_files'], obj.data['new_files'] = ftp_download_files(
source_folder,
target_folder_full,
server=server,
netrc_file=get_netrc()
)
obj.log.info("{0} new files downloaded, in total {1} files".format(
len(obj.data["new_files"]),
len(obj.data["all_files"])
))
return _get_files_from_ftp
def unzip_files(target_folder):
"""Unzip new files in data["new_files"] to target location.
All extracted files are stored in data["extracted_files"].
"""
@wraps(unzip_files)
def _unzip_files(obj, eng):
target_folder_full = get_storage_path(suffix=target_folder)
filenames = obj.data.get('all_files', list())
extracted_files = []
for filename in filenames:
try:
extracted_files.extend(unzip(filename, target_folder_full))
except BadZipfile as e:
obj.log.error("Error unzipping file {0}: {1}".format(
filename,
e
))
pass
obj.data['extracted_files'] = extracted_files
obj.log.debug("{0} new files extracted".format(
len(obj.data["extracted_files"])
))
return _unzip_files
def convert_files(target_folder):
"""Convert files in data["extracted_files"] to MARCXML."""
@wraps(convert_files)
def _convert_files(obj, eng):
from invenio.modules.knowledge.api import get_kb_mappings
mappings = dict(
map(
lambda item: (item['key'], item['value']),
get_kb_mappings('JOURNALS')
)
)
ws = WorldScientific(mappings)
target_folder_full = get_storage_path(suffix=target_folder)
args = obj.extra_data['args']
to_date = args.get("to_date") or datetime.now().strftime('%Y-%m-%d')
from_date = args.get("from_date") or '1900-01-01'
insert_files = []
filenames = obj.data['extracted_files']
for filename in filenames:
date = ws.get_date(filename)
if from_date <= date <= to_date:
marc = ws.get_record(filename)
if marc:
filename = basename(filename)
filename = join(target_folder_full, filename)
insert_files.append(filename)
with open(filename, 'w') as outfile:
outfile.write(marc)
obj.log.info("Converted {0} articles between {1} to {2}".format(
len(insert_files),
from_date,
to_date
))
obj.data['insert'] = insert_files
obj.data["result_path"] = target_folder_full
obj.log.debug("Saved converted files to {0}".format(target_folder_full))
obj.log.debug("{0} files to add".format(
len(obj.data["insert"]),
))
return _convert_files
def create_collection(obj, eng):
"""Squash all the insert records into batch collections."""
args = obj.extra_data['args']
to_date = args.get("to_date") or datetime.now().strftime('%Y-%m-%d')
from_date = args.get("from_date")
date = "_".join([d for d in [from_date, to_date] if d])
obj.data['collections'] = {}
final_filename = join(
obj.data.get("result_path", cfg.get("HARVESTER_STORAGE_PREFIX")),
"world_scientific-{0}.{1}.xml".format(date, "insert")
)
files = obj.data.get("insert", list())
if files:
with open(final_filename, 'w') as outfile:
outfile.write('<collection>\n')
for filename in files:
try:
infile = open(filename)
outfile.write(infile.read())
except IOError:
obj.log.error('Unable to locate the file {0}'.format(filename))
finally:
infile.close()
outfile.write('\n</collection>')
obj.data['collections'].update({"insert": final_filename})
obj.log.debug("{0} files ready for upload:\n{1}".format(
len(obj.data["collections"]),
"\n".join([f for f in obj.data["collections"].values()])
))
def put_files_to_ftp(server):
"""Upload files in data["collections"] to given FTP server."""
@wraps(put_files_to_ftp)
def _put_files_to_ftp(obj, eng):
collections = obj.data.get('collections', dict())
for filename in collections.values():
if cfg.get("PRODUCTION_MODE"):
ftp_upload(
filename,
server=server,
netrc_file=get_netrc(),
)
obj.log.info("Uploaded {0} to {1}".format(filename, server))
else:
obj.log.info("(pretend) Uploaded to {0} to {1}".format(filename, server))
return _put_files_to_ftp
def report_via_email(recipients, template):
"""Report about completed uploads to recipients."""
@wraps(report_via_email)
def _report_via_email(obj, eng):
collections = obj.data.get('collections', dict())
files_uploaded = []
for update_type, filename in collections.items():
count = len(obj.data.get(update_type, list()))
files_uploaded.append((basename(filename), count))
harvesting_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
context = {
"object": obj,
"files_uploaded": files_uploaded,
"args": obj.extra_data.get("args", dict()),
"harvesting_date": harvesting_date
}
body = render_template(
template,
**context
)
subject = "{0} harvest results: {1}".format(
context.get("args").get("workflow"),
harvesting_date
)
send_email(fromaddr=cfg.get("CFG_SITE_SUPPORT_EMAIL"),
toaddr=recipients,
subject=subject,
content=body)
return _report_via_email
|
yongtang/tensorflow
|
refs/heads/master
|
tensorflow/python/feature_column/sequence_feature_column.py
|
13
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This API defines FeatureColumn for sequential input.
NOTE: This API is a work in progress and will likely be changing frequently.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.feature_column import feature_column_v2 as fc
from tensorflow.python.feature_column import utils as fc_utils
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.util.tf_export import tf_export
# pylint: disable=protected-access
def concatenate_context_input(context_input, sequence_input):
"""Replicates `context_input` across all timesteps of `sequence_input`.
Expands dimension 1 of `context_input` then tiles it `sequence_length` times.
This value is appended to `sequence_input` on dimension 2 and the result is
returned.
Args:
context_input: A `Tensor` of dtype `float32` and shape `[batch_size, d1]`.
sequence_input: A `Tensor` of dtype `float32` and shape `[batch_size,
padded_length, d0]`.
Returns:
A `Tensor` of dtype `float32` and shape `[batch_size, padded_length,
d0 + d1]`.
Raises:
ValueError: If `sequence_input` does not have rank 3 or `context_input` does
not have rank 2.
"""
seq_rank_check = check_ops.assert_rank(
sequence_input,
3,
message='sequence_input must have rank 3',
data=[array_ops.shape(sequence_input)])
seq_type_check = check_ops.assert_type(
sequence_input,
dtypes.float32,
message='sequence_input must have dtype float32; got {}.'.format(
sequence_input.dtype))
ctx_rank_check = check_ops.assert_rank(
context_input,
2,
message='context_input must have rank 2',
data=[array_ops.shape(context_input)])
ctx_type_check = check_ops.assert_type(
context_input,
dtypes.float32,
message='context_input must have dtype float32; got {}.'.format(
context_input.dtype))
with ops.control_dependencies(
[seq_rank_check, seq_type_check, ctx_rank_check, ctx_type_check]):
padded_length = array_ops.shape(sequence_input)[1]
tiled_context_input = array_ops.tile(
array_ops.expand_dims(context_input, 1),
array_ops.concat([[1], [padded_length], [1]], 0))
return array_ops.concat([sequence_input, tiled_context_input], 2)
@tf_export('feature_column.sequence_categorical_column_with_identity')
def sequence_categorical_column_with_identity(
key, num_buckets, default_value=None):
"""Returns a feature column that represents sequences of integers.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
watches = sequence_categorical_column_with_identity(
'watches', num_buckets=1000)
watches_embedding = embedding_column(watches, dimension=10)
columns = [watches_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
num_buckets: Range of inputs. Namely, inputs are expected to be in the
range `[0, num_buckets)`.
default_value: If `None`, this column's graph operations will fail for
out-of-range inputs. Otherwise, this value must be in the range
`[0, num_buckets)`, and will replace out-of-range inputs.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: if `num_buckets` is less than one.
ValueError: if `default_value` is not in range `[0, num_buckets)`.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_identity(
key=key,
num_buckets=num_buckets,
default_value=default_value))
@tf_export('feature_column.sequence_categorical_column_with_hash_bucket')
def sequence_categorical_column_with_hash_bucket(
key, hash_bucket_size, dtype=dtypes.string):
"""A sequence of categorical terms where ids are set by hashing.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
tokens = sequence_categorical_column_with_hash_bucket(
'tokens', hash_bucket_size=1000)
tokens_embedding = embedding_column(tokens, dimension=10)
columns = [tokens_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
hash_bucket_size: An int > 1. The number of buckets.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: `hash_bucket_size` is not greater than 1.
ValueError: `dtype` is neither string nor integer.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_hash_bucket(
key=key,
hash_bucket_size=hash_bucket_size,
dtype=dtype))
@tf_export('feature_column.sequence_categorical_column_with_vocabulary_file')
def sequence_categorical_column_with_vocabulary_file(
key, vocabulary_file, vocabulary_size=None, num_oov_buckets=0,
default_value=None, dtype=dtypes.string):
"""A sequence of categorical terms where ids use a vocabulary file.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
states = sequence_categorical_column_with_vocabulary_file(
key='states', vocabulary_file='/us/states.txt', vocabulary_size=50,
num_oov_buckets=5)
states_embedding = embedding_column(states, dimension=10)
columns = [states_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
vocabulary_file: The vocabulary file name.
vocabulary_size: Number of the elements in the vocabulary. This must be no
greater than length of `vocabulary_file`, if less than length, later
values are ignored. If None, it is set to the length of `vocabulary_file`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
the input value. A positive `num_oov_buckets` can not be specified with
`default_value`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
dtype: The type of features. Only string and integer types are supported.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: `vocabulary_file` is missing or cannot be opened.
ValueError: `vocabulary_size` is missing or < 1.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: `dtype` is neither string nor integer.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_vocabulary_file(
key=key,
vocabulary_file=vocabulary_file,
vocabulary_size=vocabulary_size,
num_oov_buckets=num_oov_buckets,
default_value=default_value,
dtype=dtype))
@tf_export('feature_column.sequence_categorical_column_with_vocabulary_list')
def sequence_categorical_column_with_vocabulary_list(
key, vocabulary_list, dtype=None, default_value=-1, num_oov_buckets=0):
"""A sequence of categorical terms where ids use an in-memory list.
Pass this to `embedding_column` or `indicator_column` to convert sequence
categorical data into dense representation for input to sequence NN, such as
RNN.
Example:
```python
colors = sequence_categorical_column_with_vocabulary_list(
key='colors', vocabulary_list=('R', 'G', 'B', 'Y'),
num_oov_buckets=2)
colors_embedding = embedding_column(colors, dimension=3)
columns = [colors_embedding]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input feature.
vocabulary_list: An ordered iterable defining the vocabulary. Each feature
is mapped to the index of its value (if present) in `vocabulary_list`.
Must be castable to `dtype`.
dtype: The type of features. Only string and integer types are supported.
If `None`, it will be inferred from `vocabulary_list`.
default_value: The integer ID value to return for out-of-vocabulary feature
values, defaults to `-1`. This can not be specified with a positive
`num_oov_buckets`.
num_oov_buckets: Non-negative integer, the number of out-of-vocabulary
buckets. All out-of-vocabulary inputs will be assigned IDs in the range
`[len(vocabulary_list), len(vocabulary_list)+num_oov_buckets)` based on a
hash of the input value. A positive `num_oov_buckets` can not be specified
with `default_value`.
Returns:
A `SequenceCategoricalColumn`.
Raises:
ValueError: if `vocabulary_list` is empty, or contains duplicate keys.
ValueError: `num_oov_buckets` is a negative integer.
ValueError: `num_oov_buckets` and `default_value` are both specified.
ValueError: if `dtype` is not integer or string.
"""
return fc.SequenceCategoricalColumn(
fc.categorical_column_with_vocabulary_list(
key=key,
vocabulary_list=vocabulary_list,
dtype=dtype,
default_value=default_value,
num_oov_buckets=num_oov_buckets))
@tf_export('feature_column.sequence_numeric_column')
def sequence_numeric_column(
key,
shape=(1,),
default_value=0.,
dtype=dtypes.float32,
normalizer_fn=None):
"""Returns a feature column that represents sequences of numeric data.
Example:
```python
temperature = sequence_numeric_column('temperature')
columns = [temperature]
features = tf.io.parse_example(..., features=make_parse_example_spec(columns))
sequence_feature_layer = SequenceFeatures(columns)
sequence_input, sequence_length = sequence_feature_layer(features)
sequence_length_mask = tf.sequence_mask(sequence_length)
rnn_cell = tf.keras.layers.SimpleRNNCell(hidden_size)
rnn_layer = tf.keras.layers.RNN(rnn_cell)
outputs, state = rnn_layer(sequence_input, mask=sequence_length_mask)
```
Args:
key: A unique string identifying the input features.
shape: The shape of the input data per sequence id. E.g. if `shape=(2,)`,
each example must contain `2 * sequence_length` values.
default_value: A single value compatible with `dtype` that is used for
padding the sparse data into a dense `Tensor`.
dtype: The type of values.
normalizer_fn: If not `None`, a function that can be used to normalize the
value of the tensor after `default_value` is applied for parsing.
Normalizer function takes the input `Tensor` as its argument, and returns
the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that
even though the most common use case of this function is normalization, it
can be used for any kind of Tensorflow transformations.
Returns:
A `SequenceNumericColumn`.
Raises:
TypeError: if any dimension in shape is not an int.
ValueError: if any dimension in shape is not a positive integer.
ValueError: if `dtype` is not convertible to `tf.float32`.
"""
shape = fc._check_shape(shape=shape, key=key)
if not (dtype.is_integer or dtype.is_floating):
raise ValueError('dtype must be convertible to float. '
'dtype: {}, key: {}'.format(dtype, key))
if normalizer_fn is not None and not callable(normalizer_fn):
raise TypeError(
'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn))
return SequenceNumericColumn(
key,
shape=shape,
default_value=default_value,
dtype=dtype,
normalizer_fn=normalizer_fn)
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with ops.name_scope(name, 'assert_all_equal', values=tensors):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(check_ops.assert_equal(tensors[0], t))
with ops.control_dependencies(assert_equal_ops):
return array_ops.identity(tensors[0])
class SequenceNumericColumn(
fc.SequenceDenseColumn,
collections.namedtuple(
'SequenceNumericColumn',
('key', 'shape', 'default_value', 'dtype', 'normalizer_fn'))):
"""Represents sequences of numeric data."""
@property
def _is_v2_column(self):
return True
@property
def name(self):
"""See `FeatureColumn` base class."""
return self.key
@property
def parse_example_spec(self):
"""See `FeatureColumn` base class."""
return {self.key: parsing_ops.VarLenFeature(self.dtype)}
def transform_feature(self, transformation_cache, state_manager):
"""See `FeatureColumn` base class.
In this case, we apply the `normalizer_fn` to the input tensor.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
Returns:
Normalized input tensor.
"""
input_tensor = transformation_cache.get(self.key, state_manager)
if self.normalizer_fn is not None:
input_tensor = self.normalizer_fn(input_tensor)
return input_tensor
@property
def variable_shape(self):
"""Returns a `TensorShape` representing the shape of sequence input."""
return tensor_shape.TensorShape(self.shape)
def get_sequence_dense_tensor(self, transformation_cache, state_manager):
"""Returns a `TensorSequenceLengthPair`.
Args:
transformation_cache: A `FeatureTransformationCache` object to access
features.
state_manager: A `StateManager` to create / access resources such as
lookup tables.
"""
sp_tensor = transformation_cache.get(self, state_manager)
dense_tensor = sparse_ops.sparse_tensor_to_dense(
sp_tensor, default_value=self.default_value)
# Reshape into [batch_size, T, variable_shape].
dense_shape = array_ops.concat(
[array_ops.shape(dense_tensor)[:1], [-1], self.variable_shape],
axis=0)
dense_tensor = array_ops.reshape(dense_tensor, shape=dense_shape)
# Get the number of timesteps per example
# For the 2D case, the raw values are grouped according to num_elements;
# for the 3D case, the grouping happens in the third dimension, and
# sequence length is not affected.
if sp_tensor.shape.ndims == 2:
num_elements = self.variable_shape.num_elements()
else:
num_elements = 1
seq_length = fc_utils.sequence_length_from_sparse_tensor(
sp_tensor, num_elements=num_elements)
return fc.SequenceDenseColumn.TensorSequenceLengthPair(
dense_tensor=dense_tensor, sequence_length=seq_length)
@property
def parents(self):
"""See 'FeatureColumn` base class."""
return [self.key]
def get_config(self):
"""See 'FeatureColumn` base class."""
config = dict(zip(self._fields, self))
config['dtype'] = self.dtype.name
return config
@classmethod
def from_config(cls, config, custom_objects=None, columns_by_name=None):
"""See 'FeatureColumn` base class."""
fc._check_config_keys(config, cls._fields)
kwargs = fc._standardize_and_copy_config(config)
kwargs['dtype'] = dtypes.as_dtype(config['dtype'])
return cls(**kwargs)
# pylint: enable=protected-access
|
saurabh6790/medsyn-app
|
refs/heads/master
|
manufacturing/doctype/bom/bom.py
|
30
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint, cstr, flt, now, nowdate
from webnotes.model.doc import addchild
from webnotes.model.bean import getlist
from webnotes.model.code import get_obj
from webnotes import msgprint, _
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def autoname(self):
last_name = webnotes.conn.sql("""select max(name) from `tabBOM`
where name like "BOM/%s/%%" """ % cstr(self.doc.item).replace('"', '\\"'))
if last_name:
idx = cint(cstr(last_name[0][0]).split('/')[-1].split('-')[0]) + 1
else:
idx = 1
self.doc.name = 'BOM/' + self.doc.item + ('/%.3i' % idx)
def validate(self):
self.clear_operations()
self.validate_main_item()
from utilities.transaction_base import validate_uom_is_integer
validate_uom_is_integer(self.doclist, "stock_uom", "qty")
self.validate_operations()
self.validate_materials()
self.set_bom_material_details()
self.calculate_cost()
def on_update(self):
self.check_recursion()
self.update_exploded_items()
self.doc.save()
def on_submit(self):
self.manage_default_bom()
def on_cancel(self):
webnotes.conn.set(self.doc, "is_active", 0)
webnotes.conn.set(self.doc, "is_default", 0)
# check if used in any other bom
self.validate_bom_links()
self.manage_default_bom()
def on_update_after_submit(self):
self.validate_bom_links()
self.manage_default_bom()
def get_item_det(self, item_code):
item = webnotes.conn.sql("""select name, is_asset_item, is_purchase_item,
docstatus, description, is_sub_contracted_item, stock_uom, default_bom,
last_purchase_rate, standard_rate, is_manufactured_item
from `tabItem` where name=%s""", item_code, as_dict = 1)
return item
def validate_rm_item(self, item):
if item[0]['name'] == self.doc.item:
msgprint("Item_code: %s in materials tab cannot be same as FG Item",
item[0]['name'], raise_exception=1)
if not item or item[0]['docstatus'] == 2:
msgprint("Item %s does not exist in system" % item[0]['item_code'], raise_exception = 1)
def set_bom_material_details(self):
for item in self.doclist.get({"parentfield": "bom_materials"}):
ret = self.get_bom_material_detail({"item_code": item.item_code, "bom_no": item.bom_no,
"qty": item.qty})
for r in ret:
if not item.fields.get(r):
item.fields[r] = ret[r]
def get_bom_material_detail(self, args=None):
""" Get raw material details like uom, desc and rate"""
if not args:
args = webnotes.form_dict.get('args')
if isinstance(args, basestring):
import json
args = json.loads(args)
item = self.get_item_det(args['item_code'])
self.validate_rm_item(item)
args['bom_no'] = args['bom_no'] or item and cstr(item[0]['default_bom']) or ''
args.update(item[0])
rate = self.get_rm_rate(args)
ret_item = {
'description' : item and args['description'] or '',
'stock_uom' : item and args['stock_uom'] or '',
'bom_no' : args['bom_no'],
'rate' : rate
}
return ret_item
def get_rm_rate(self, arg):
""" Get raw material rate as per selected method, if bom exists takes bom cost """
rate = 0
if arg['bom_no']:
rate = self.get_bom_unitcost(arg['bom_no'])
elif arg and (arg['is_purchase_item'] == 'Yes' or arg['is_sub_contracted_item'] == 'Yes'):
if self.doc.rm_cost_as_per == 'Valuation Rate':
rate = self.get_valuation_rate(arg)
elif self.doc.rm_cost_as_per == 'Last Purchase Rate':
rate = arg['last_purchase_rate']
elif self.doc.rm_cost_as_per == "Price List":
if not self.doc.buying_price_list:
webnotes.throw(_("Please select Price List"))
rate = webnotes.conn.get_value("Item Price", {"price_list": self.doc.buying_price_list,
"item_code": arg["item_code"]}, "ref_rate") or 0
elif self.doc.rm_cost_as_per == 'Standard Rate':
rate = arg['standard_rate']
return rate
def update_cost(self):
for d in self.doclist.get({"parentfield": "bom_materials"}):
d.rate = self.get_bom_material_detail({
'item_code': d.item_code,
'bom_no': d.bom_no,
'qty': d.qty
})["rate"]
if self.doc.docstatus == 0:
webnotes.bean(self.doclist).save()
elif self.doc.docstatus == 1:
self.calculate_cost()
self.update_exploded_items()
webnotes.bean(self.doclist).update_after_submit()
def get_bom_unitcost(self, bom_no):
bom = webnotes.conn.sql("""select name, total_cost/quantity as unit_cost from `tabBOM`
where is_active = 1 and name = %s""", bom_no, as_dict=1)
return bom and bom[0]['unit_cost'] or 0
def get_valuation_rate(self, args):
""" Get average valuation rate of relevant warehouses
as per valuation method (MAR/FIFO)
as on costing date
"""
from stock.utils import get_incoming_rate
dt = self.doc.costing_date or nowdate()
time = self.doc.costing_date == nowdate() and now().split()[1] or '23:59'
warehouse = webnotes.conn.sql("select warehouse from `tabBin` where item_code = %s", args['item_code'])
rate = []
for wh in warehouse:
r = get_incoming_rate({
"item_code": args.get("item_code"),
"warehouse": wh[0],
"posting_date": dt,
"posting_time": time,
"qty": args.get("qty") or 0
})
if r:
rate.append(r)
return rate and flt(sum(rate))/len(rate) or 0
def manage_default_bom(self):
""" Uncheck others if current one is selected as default,
update default bom in item master
"""
if self.doc.is_default and self.doc.is_active:
from webnotes.model.utils import set_default
set_default(self.doc, "item")
webnotes.conn.set_value("Item", self.doc.item, "default_bom", self.doc.name)
else:
if not self.doc.is_active:
webnotes.conn.set(self.doc, "is_default", 0)
webnotes.conn.sql("update `tabItem` set default_bom = null where name = %s and default_bom = %s",
(self.doc.item, self.doc.name))
def clear_operations(self):
if not self.doc.with_operations:
self.doclist = self.doc.clear_table(self.doclist, 'bom_operations')
for d in self.doclist.get({"parentfield": "bom_materials"}):
d.operation_no = None
def validate_main_item(self):
""" Validate main FG item"""
item = self.get_item_det(self.doc.item)
if not item:
msgprint("Item %s does not exists in the system or expired." %
self.doc.item, raise_exception = 1)
elif item[0]['is_manufactured_item'] != 'Yes' \
and item[0]['is_sub_contracted_item'] != 'Yes':
msgprint("""As Item: %s is not a manufactured / sub-contracted item, \
you can not make BOM for it""" % self.doc.item, raise_exception = 1)
else:
ret = webnotes.conn.get_value("Item", self.doc.item, ["description", "stock_uom"])
self.doc.description = ret[0]
self.doc.uom = ret[1]
def validate_operations(self):
""" Check duplicate operation no"""
self.op = []
for d in getlist(self.doclist, 'bom_operations'):
if cstr(d.operation_no) in self.op:
msgprint("Operation no: %s is repeated in Operations Table" %
d.operation_no, raise_exception=1)
else:
# add operation in op list
self.op.append(cstr(d.operation_no))
def validate_materials(self):
""" Validate raw material entries """
check_list = []
for m in getlist(self.doclist, 'bom_materials'):
# check if operation no not in op table
if self.doc.with_operations and cstr(m.operation_no) not in self.op:
msgprint("""Operation no: %s against item: %s at row no: %s \
is not present at Operations table""" %
(m.operation_no, m.item_code, m.idx), raise_exception = 1)
item = self.get_item_det(m.item_code)
if item[0]['is_manufactured_item'] == 'Yes':
if not m.bom_no:
msgprint("Please enter BOM No aginst item: %s at row no: %s" %
(m.item_code, m.idx), raise_exception=1)
else:
self.validate_bom_no(m.item_code, m.bom_no, m.idx)
elif m.bom_no:
msgprint("""As Item %s is not a manufactured / sub-contracted item, \
you can not enter BOM against it (Row No: %s).""" %
(m.item_code, m.idx), raise_exception = 1)
if flt(m.qty) <= 0:
msgprint("Please enter qty against raw material: %s at row no: %s" %
(m.item_code, m.idx), raise_exception = 1)
self.check_if_item_repeated(m.item_code, m.operation_no, check_list)
def validate_bom_no(self, item, bom_no, idx):
"""Validate BOM No of sub-contracted items"""
bom = webnotes.conn.sql("""select name from `tabBOM` where name = %s and item = %s
and is_active=1 and docstatus=1""",
(bom_no, item), as_dict =1)
if not bom:
msgprint("""Incorrect BOM No: %s against item: %s at row no: %s.
It may be inactive or not submitted or does not belong to this item.""" %
(bom_no, item, idx), raise_exception = 1)
def check_if_item_repeated(self, item, op, check_list):
if [cstr(item), cstr(op)] in check_list:
msgprint(_("Item") + " %s " % (item,) + _("has been entered atleast twice")
+ (cstr(op) and _(" against same operation") or ""), raise_exception=1)
else:
check_list.append([cstr(item), cstr(op)])
def check_recursion(self):
""" Check whether recursion occurs in any bom"""
check_list = [['parent', 'bom_no', 'parent'], ['bom_no', 'parent', 'child']]
for d in check_list:
bom_list, count = [self.doc.name], 0
while (len(bom_list) > count ):
boms = webnotes.conn.sql(" select %s from `tabBOM Item` where %s = '%s' " %
(d[0], d[1], cstr(bom_list[count])))
count = count + 1
for b in boms:
if b[0] == self.doc.name:
msgprint("""Recursion Occured => '%s' cannot be '%s' of '%s'.
""" % (cstr(b[0]), cstr(d[2]), self.doc.name), raise_exception = 1)
if b[0]:
bom_list.append(b[0])
def update_cost_and_exploded_items(self, bom_list=[]):
bom_list = self.traverse_tree(bom_list)
for bom in bom_list:
bom_obj = get_obj("BOM", bom, with_children=1)
bom_obj.on_update()
return bom_list
def traverse_tree(self, bom_list=[]):
def _get_children(bom_no):
return [cstr(d[0]) for d in webnotes.conn.sql("""select bom_no from `tabBOM Item`
where parent = %s and ifnull(bom_no, '') != ''""", bom_no)]
count = 0
if self.doc.name not in bom_list:
bom_list.append(self.doc.name)
while(count < len(bom_list)):
for child_bom in _get_children(bom_list[count]):
if child_bom not in bom_list:
bom_list.append(child_bom)
count += 1
bom_list.reverse()
return bom_list
def calculate_cost(self):
"""Calculate bom totals"""
self.calculate_op_cost()
self.calculate_rm_cost()
self.doc.total_cost = self.doc.raw_material_cost + self.doc.operating_cost
def calculate_op_cost(self):
"""Update workstation rate and calculates totals"""
total_op_cost = 0
for d in getlist(self.doclist, 'bom_operations'):
if d.workstation and not d.hour_rate:
d.hour_rate = webnotes.conn.get_value("Workstation", d.workstation, "hour_rate")
if d.hour_rate and d.time_in_mins:
d.operating_cost = flt(d.hour_rate) * flt(d.time_in_mins) / 60.0
total_op_cost += flt(d.operating_cost)
self.doc.operating_cost = total_op_cost
def calculate_rm_cost(self):
"""Fetch RM rate as per today's valuation rate and calculate totals"""
total_rm_cost = 0
for d in getlist(self.doclist, 'bom_materials'):
if d.bom_no:
d.rate = self.get_bom_unitcost(d.bom_no)
d.amount = flt(d.rate) * flt(d.qty)
d.qty_consumed_per_unit = flt(d.qty) / flt(self.doc.quantity)
total_rm_cost += d.amount
self.doc.raw_material_cost = total_rm_cost
def update_exploded_items(self):
""" Update Flat BOM, following will be correct data"""
self.get_exploded_items()
self.add_exploded_items()
def get_exploded_items(self):
""" Get all raw materials including items from child bom"""
self.cur_exploded_items = {}
for d in getlist(self.doclist, 'bom_materials'):
if d.bom_no:
self.get_child_exploded_items(d.bom_no, d.qty)
else:
self.add_to_cur_exploded_items(webnotes._dict({
'item_code' : d.item_code,
'description' : d.description,
'stock_uom' : d.stock_uom,
'qty' : flt(d.qty),
'rate' : flt(d.rate),
}))
def add_to_cur_exploded_items(self, args):
if self.cur_exploded_items.get(args.item_code):
self.cur_exploded_items[args.item_code]["qty"] += args.qty
else:
self.cur_exploded_items[args.item_code] = args
def get_child_exploded_items(self, bom_no, qty):
""" Add all items from Flat BOM of child BOM"""
child_fb_items = webnotes.conn.sql("""select item_code, description, stock_uom, qty, rate,
qty_consumed_per_unit from `tabBOM Explosion Item`
where parent = %s and docstatus = 1""", bom_no, as_dict = 1)
for d in child_fb_items:
self.add_to_cur_exploded_items(webnotes._dict({
'item_code' : d['item_code'],
'description' : d['description'],
'stock_uom' : d['stock_uom'],
'qty' : flt(d['qty_consumed_per_unit'])*qty,
'rate' : flt(d['rate']),
}))
def add_exploded_items(self):
"Add items to Flat BOM table"
self.doclist = self.doc.clear_table(self.doclist, 'flat_bom_details', 1)
for d in self.cur_exploded_items:
ch = addchild(self.doc, 'flat_bom_details', 'BOM Explosion Item', self.doclist)
for i in self.cur_exploded_items[d].keys():
ch.fields[i] = self.cur_exploded_items[d][i]
ch.amount = flt(ch.qty) * flt(ch.rate)
ch.qty_consumed_per_unit = flt(ch.qty) / flt(self.doc.quantity)
ch.docstatus = self.doc.docstatus
ch.save(1)
def get_parent_bom_list(self, bom_no):
p_bom = webnotes.conn.sql("select parent from `tabBOM Item` where bom_no = '%s'" % bom_no)
return p_bom and [i[0] for i in p_bom] or []
def validate_bom_links(self):
if not self.doc.is_active:
act_pbom = webnotes.conn.sql("""select distinct bom_item.parent from `tabBOM Item` bom_item
where bom_item.bom_no = %s and bom_item.docstatus = 1
and exists (select * from `tabBOM` where name = bom_item.parent
and docstatus = 1 and is_active = 1)""", self.doc.name)
if act_pbom and act_pbom[0][0]:
action = self.doc.docstatus < 2 and _("deactivate") or _("cancel")
msgprint(_("Cannot ") + action + _(": It is linked to other active BOM(s)"),
raise_exception=1)
def get_bom_items_as_dict(bom, qty=1, fetch_exploded=1):
item_dict = {}
query = """select
bom_item.item_code,
item.item_name,
ifnull(sum(bom_item.qty_consumed_per_unit),0) * %(qty)s as qty,
item.description,
item.stock_uom,
item.default_warehouse,
item.purchase_account as expense_account,
item.cost_center
from
`tab%(table)s` bom_item, `tabItem` item
where
bom_item.docstatus < 2
and bom_item.parent = "%(bom)s"
and item.name = bom_item.item_code
%(conditions)s
group by item_code, stock_uom"""
if fetch_exploded:
items = webnotes.conn.sql(query % {
"qty": qty,
"table": "BOM Explosion Item",
"bom": bom,
"conditions": """and ifnull(item.is_pro_applicable, 'No') = 'No'
and ifnull(item.is_sub_contracted_item, 'No') = 'No' """
}, as_dict=True)
else:
items = webnotes.conn.sql(query % {
"qty": qty,
"table": "BOM Item",
"bom": bom,
"conditions": ""
}, as_dict=True)
# make unique
for item in items:
if item_dict.has_key(item.item_code):
item_dict[item.item_code]["qty"] += flt(item.qty)
else:
item_dict[item.item_code] = item
return item_dict
@webnotes.whitelist()
def get_bom_items(bom, qty=1, fetch_exploded=1):
items = get_bom_items_as_dict(bom, qty, fetch_exploded).values()
items.sort(lambda a, b: a.item_code > b.item_code and 1 or -1)
return items
|
sankhesh/VTK
|
refs/heads/master
|
ThirdParty/Twisted/twisted/internet/test/test_gtkreactor.py
|
28
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
import sys
from twisted.trial.unittest import TestCase
class GtkReactorDeprecation(TestCase):
"""
Tests to ensure all attributes of L{twisted.internet.gtkreactor} are
deprecated.
"""
class StubGTK:
class GDK:
INPUT_READ = None
def input_add(self, *params):
pass
class StubPyGTK:
def require(self, something):
pass
def setUp(self):
"""
Create a stub for the module 'gtk' if it does not exist, so that it can
be imported without errors or warnings.
"""
self.mods = sys.modules.copy()
sys.modules['gtk'] = self.StubGTK()
sys.modules['pygtk'] = self.StubPyGTK()
def tearDown(self):
"""
Return sys.modules to the way it was before the test.
"""
sys.modules.clear()
sys.modules.update(self.mods)
def lookForDeprecationWarning(self, testmethod, attributeName):
warningsShown = self.flushWarnings([testmethod])
self.assertEqual(len(warningsShown), 1)
self.assertIs(warningsShown[0]['category'], DeprecationWarning)
self.assertEqual(
warningsShown[0]['message'],
"twisted.internet.gtkreactor." + attributeName + " "
"was deprecated in Twisted 10.1.0: All new applications should be "
"written with gtk 2.x, which is supported by "
"twisted.internet.gtk2reactor.")
def test_gtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.GtkReactor();
self.lookForDeprecationWarning(self.test_gtkReactor, "GtkReactor")
def test_portableGtkReactor(self):
"""
Test deprecation of L{gtkreactor.GtkReactor}
"""
from twisted.internet import gtkreactor
gtkreactor.PortableGtkReactor()
self.lookForDeprecationWarning(self.test_portableGtkReactor,
"PortableGtkReactor")
def test_install(self):
"""
Test deprecation of L{gtkreactor.install}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.install)
self.lookForDeprecationWarning(self.test_install, "install")
def test_portableInstall(self):
"""
Test deprecation of L{gtkreactor.portableInstall}
"""
from twisted.internet import gtkreactor
self.assertRaises(AssertionError, gtkreactor.portableInstall)
self.lookForDeprecationWarning(self.test_portableInstall,
"portableInstall")
|
mhnatiuk/phd_sociology_of_religion
|
refs/heads/master
|
scrapper/build/scrapy/scrapy/xlib/tx/__init__.py
|
161
|
from scrapy import twisted_version
if twisted_version > (13, 0, 0):
from twisted.web import client
from twisted.internet import endpoints
if twisted_version >= (11, 1, 0):
from . import client, endpoints
else:
from scrapy.exceptions import NotSupported
class _Mocked(object):
def __init__(self, *args, **kw):
raise NotSupported('HTTP1.1 not supported')
class _Mock(object):
def __getattr__(self, name):
return _Mocked
client = endpoints = _Mock()
Agent = client.Agent
ProxyAgent = client.ProxyAgent
ResponseDone = client.ResponseDone
ResponseFailed = client.ResponseFailed
HTTPConnectionPool = client.HTTPConnectionPool
TCP4ClientEndpoint = endpoints.TCP4ClientEndpoint
|
mhdella/scikit-learn
|
refs/heads/master
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
luiscarlosgph/nas
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/conf/locale/el/formats.py
|
49
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd E Y'
TIME_FORMAT = 'g:i:s A'
# DATETIME_FORMAT =
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
# NUMBER_GROUPING =
|
xkmato/ureport
|
refs/heads/master
|
ureport/countries/management/commands/__init__.py
|
14
|
__author__ = 'norkans7'
|
glwu/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/test/test_setcomps.py
|
201
|
doctests = """
########### Tests mostly copied from test_listcomps.py ############
Test simple loop with conditional
>>> sum({i*i for i in range(100) if i&1 == 1})
166650
Test simple case
>>> {2*y + x + 1 for x in (0,) for y in (1,)}
{3}
Test simple nesting
>>> list(sorted({(i,j) for i in range(3) for j in range(4)}))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Test nesting with the inner expression dependent on the outer
>>> list(sorted({(i,j) for i in range(4) for j in range(i)}))
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
Make sure the induction variable is not exposed
>>> i = 20
>>> sum({i*i for i in range(100)})
328350
>>> i
20
Verify that syntax error's are raised for setcomps used as lvalues
>>> {y for y in (1,2)} = 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
>>> {y for y in (1,2)} += 10 # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
SyntaxError: ...
Make a nested set comprehension that acts like set(range())
>>> def srange(n):
... return {i for i in range(n)}
>>> list(sorted(srange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Same again, only as a lambda expression instead of a function definition
>>> lrange = lambda n: {i for i in range(n)}
>>> list(sorted(lrange(10)))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Generators can call other generators:
>>> def grange(n):
... for x in {i for i in range(n)}:
... yield x
>>> list(sorted(grange(5)))
[0, 1, 2, 3, 4]
Make sure that None is a valid return value
>>> {None for i in range(10)}
{None}
########### Tests for various scoping corner cases ############
Return lambdas that use the iteration variable as a default argument
>>> items = {(lambda i=i: i) for i in range(5)}
>>> {x() for x in items} == set(range(5))
True
Same again, only this time as a closure variable
>>> items = {(lambda: i) for i in range(5)}
>>> {x() for x in items}
{4}
Another way to test that the iteration variable is local to the list comp
>>> items = {(lambda: i) for i in range(5)}
>>> i = 20
>>> {x() for x in items}
{4}
And confirm that a closure can jump over the list comp scope
>>> items = {(lambda: y) for i in range(5)}
>>> y = 2
>>> {x() for x in items}
{2}
We also repeat each of the above scoping tests inside a function
>>> def test_func():
... items = {(lambda i=i: i) for i in range(5)}
... return {x() for x in items}
>>> test_func() == set(range(5))
True
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... return {x() for x in items}
>>> test_func()
{4}
>>> def test_func():
... items = {(lambda: i) for i in range(5)}
... i = 20
... return {x() for x in items}
>>> test_func()
{4}
>>> def test_func():
... items = {(lambda: y) for i in range(5)}
... y = 2
... return {x() for x in items}
>>> test_func()
{2}
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=None):
import sys
from test import support
from test import test_setcomps
support.run_doctest(test_setcomps, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_doctest(test_setcomps, verbose)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
|
ssuthiku/linux
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
1996
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
ptpt/taoblog
|
refs/heads/master
|
taoblog/application.py
|
1
|
__all__ = ['application']
import os
import tempfile
from flask import Flask
from .views import configure_app
from .models import bind_engine
DEBUG = False
DATABASE_ECHO = False
DATABASE_URI = 'sqlite:///%s' % tempfile.mkstemp()[1]
POST_PERPAGE = 8
POST_API_PERPAGE = 20
POST_FEED_PERPAGE = 20
# no one is admin by default
ADMIN_EMAIL = None
BLOG_TITLE = 'Taoblog'
SECRET_KEY = os.urandom(24)
LOCALE = 'en'
application = Flask('taoblog')
application.config.from_object(__name__)
# load config from the file specified by the env var
application.config.from_envvar('TAOBLOG_CONFIG_PATH', silent=True)
# init database
bind_engine(application.config['DATABASE_URI'],
application.config['DATABASE_ECHO'])
# configure application
configure_app(application)
|
trakerr-com/trakerr-python
|
refs/heads/master
|
trakerr_client/models/stack_trace_lines.py
|
3
|
# coding: utf-8
"""
Trakerr API
Get your application events and errors to Trakerr via the *Trakerr API*.
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class StackTraceLines(list):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
StackTraceLines - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
super(StackTraceLines, self).__init__()
self.swagger_types = {
}
self.attribute_map = {
}
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
palmtree5/Red-DiscordBot
|
refs/heads/V3/py39
|
redbot/cogs/trivia/log.py
|
4
|
"""Log for Trivia cog."""
import logging
__all__ = ["LOG"]
LOG = logging.getLogger("red.trivia")
|
bealdav/OpenUpgrade
|
refs/heads/8.0
|
openerp/addons/base/workflow/workflow.py
|
33
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.workflow
class workflow(osv.osv):
_name = "workflow"
_table = "wkf"
_order = "name"
_columns = {
'name': fields.char('Name', size=64, required=True),
'osv': fields.char('Resource Object', size=64, required=True,select=True),
'on_create': fields.boolean('On Create', select=True),
'activities': fields.one2many('workflow.activity', 'wkf_id', 'Activities'),
}
_defaults = {
'on_create': lambda *a: True
}
def write(self, cr, user, ids, vals, context=None):
if not context:
context={}
openerp.workflow.clear_cache(cr, user)
return super(workflow, self).write(cr, user, ids, vals, context=context)
def get_active_workitems(self, cr, uid, res, res_id, context=None):
cr.execute('select * from wkf where osv=%s limit 1',(res,))
wkfinfo = cr.dictfetchone()
workitems = []
if wkfinfo:
cr.execute('SELECT id FROM wkf_instance \
WHERE res_id=%s AND wkf_id=%s \
ORDER BY state LIMIT 1',
(res_id, wkfinfo['id']))
inst_id = cr.fetchone()
cr.execute('select act_id,count(*) from wkf_workitem where inst_id=%s group by act_id', (inst_id,))
workitems = dict(cr.fetchall())
return {'wkf': wkfinfo, 'workitems': workitems}
def create(self, cr, user, vals, context=None):
if not context:
context={}
openerp.workflow.clear_cache(cr, user)
return super(workflow, self).create(cr, user, vals, context=context)
workflow()
class wkf_activity(osv.osv):
_name = "workflow.activity"
_table = "wkf_activity"
_order = "name"
_columns = {
'name': fields.char('Name', size=64, required=True),
'wkf_id': fields.many2one('workflow', 'Workflow', required=True, select=True, ondelete='cascade'),
'split_mode': fields.selection([('XOR', 'Xor'), ('OR','Or'), ('AND','And')], 'Split Mode', size=3, required=True),
'join_mode': fields.selection([('XOR', 'Xor'), ('AND', 'And')], 'Join Mode', size=3, required=True),
'kind': fields.selection([('dummy', 'Dummy'), ('function', 'Function'), ('subflow', 'Subflow'), ('stopall', 'Stop All')], 'Kind', size=64, required=True),
'action': fields.text('Python Action'),
'action_id': fields.many2one('ir.actions.server', 'Server Action', ondelete='set null'),
'flow_start': fields.boolean('Flow Start'),
'flow_stop': fields.boolean('Flow Stop'),
'subflow_id': fields.many2one('workflow', 'Subflow'),
'signal_send': fields.char('Signal (subflow.*)', size=32),
'out_transitions': fields.one2many('workflow.transition', 'act_from', 'Outgoing Transitions'),
'in_transitions': fields.one2many('workflow.transition', 'act_to', 'Incoming Transitions'),
}
_defaults = {
'kind': lambda *a: 'dummy',
'join_mode': lambda *a: 'XOR',
'split_mode': lambda *a: 'XOR',
}
def unlink(self, cr, uid, ids, context=None):
if context is None: context = {}
if not context.get('_force_unlink') and self.pool.get('workflow.workitem').search(cr, uid, [('act_id', 'in', ids)]):
raise osv.except_osv(_('Operation Forbidden'),
_('Please make sure no workitems refer to an activity before deleting it!'))
super(wkf_activity, self).unlink(cr, uid, ids, context=context)
wkf_activity()
class wkf_transition(osv.osv):
_table = "wkf_transition"
_name = "workflow.transition"
_rec_name = 'signal'
_columns = {
'trigger_model': fields.char('Trigger Object', size=128),
'trigger_expr_id': fields.char('Trigger Expression', size=128),
'signal': fields.char('Signal (Button Name)', size=64,
help="When the operation of transition comes from a button pressed in the client form, "\
"signal tests the name of the pressed button. If signal is NULL, no button is necessary to validate this transition."),
'group_id': fields.many2one('res.groups', 'Group Required',
help="The group that a user must have to be authorized to validate this transition."),
'condition': fields.char('Condition', required=True, size=128,
help="Expression to be satisfied if we want the transition done."),
'act_from': fields.many2one('workflow.activity', 'Source Activity', required=True, select=True, ondelete='cascade',
help="Source activity. When this activity is over, the condition is tested to determine if we can start the ACT_TO activity."),
'act_to': fields.many2one('workflow.activity', 'Destination Activity', required=True, select=True, ondelete='cascade',
help="The destination activity."),
'wkf_id': fields.related('act_from','wkf_id', type='many2one', relation='workflow', string='Workflow', select=True),
}
_defaults = {
'condition': lambda *a: 'True',
}
def name_get(self, cr, uid, ids, context=None):
return [(line.id, (line.act_from.name) + '+' + (line.act_to.name)) if line.signal == False else (line.id, line.signal) for line in self.browse(cr, uid, ids, context=context)]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if name:
ids = self.search(cr, user, ['|',('act_from', operator, name),('act_to', operator, name)] + args, limit=limit)
return self.name_get(cr, user, ids, context=context)
return super(wkf_transition, self).name_search(cr, user, name, args=args, operator=operator, context=context, limit=limit)
wkf_transition()
class wkf_instance(osv.osv):
_table = "wkf_instance"
_name = "workflow.instance"
_rec_name = 'res_type'
_log_access = False
_columns = {
'uid': fields.integer('User'), # FIXME no constraint??
'wkf_id': fields.many2one('workflow', 'Workflow', ondelete='cascade', select=True),
'res_id': fields.integer('Resource ID'),
'res_type': fields.char('Resource Object', size=64),
'state': fields.char('Status', size=32),
'transition_ids': fields.many2many('workflow.transition', 'wkf_witm_trans', 'inst_id', 'trans_id'),
}
def _auto_init(self, cr, context=None):
super(wkf_instance, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_instance_res_type_res_id_state_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX wkf_instance_res_type_res_id_state_index ON wkf_instance (res_type, res_id, state)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_instance_res_id_wkf_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX wkf_instance_res_id_wkf_id_index ON wkf_instance (res_id, wkf_id)')
wkf_instance()
class wkf_workitem(osv.osv):
_table = "wkf_workitem"
_name = "workflow.workitem"
_log_access = False
_rec_name = 'state'
_columns = {
'act_id': fields.many2one('workflow.activity', 'Activity', required=True, ondelete="cascade", select=True),
'wkf_id': fields.related('act_id','wkf_id', type='many2one', relation='workflow', string='Workflow'),
'subflow_id': fields.many2one('workflow.instance', 'Subflow', ondelete="cascade", select=True),
'inst_id': fields.many2one('workflow.instance', 'Instance', required=True, ondelete="cascade", select=True),
'state': fields.char('Status', size=64, select=True),
}
wkf_workitem()
class wkf_triggers(osv.osv):
_table = "wkf_triggers"
_name = "workflow.triggers"
_log_access = False
_columns = {
'res_id': fields.integer('Resource ID', size=128),
'model': fields.char('Object', size=128),
'instance_id': fields.many2one('workflow.instance', 'Destination Instance', ondelete="cascade"),
'workitem_id': fields.many2one('workflow.workitem', 'Workitem', required=True, ondelete="cascade"),
}
def _auto_init(self, cr, context=None):
super(wkf_triggers, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'wkf_triggers_res_id_model_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX wkf_triggers_res_id_model_index ON wkf_triggers (res_id, model)')
wkf_triggers()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ondra-novak/chromium.src
|
refs/heads/nw
|
tools/perf/benchmarks/media.py
|
32
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import media
import page_sets
from telemetry import benchmark
from telemetry.page import page_test
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
class _MSEMeasurement(page_test.PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
media_metric = tab.EvaluateJavaScript('window.__testMetrics')
trace = media_metric['id'] if 'id' in media_metric else None
metrics = media_metric['metrics'] if 'metrics' in media_metric else []
for m in metrics:
trace_name = '%s.%s' % (m, trace)
if isinstance(metrics[m], list):
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, trace_name, units='ms',
values=[float(v) for v in metrics[m]],
important=True))
else:
results.AddValue(scalar.ScalarValue(
results.current_page, trace_name, units='ms',
value=float(metrics[m]), important=True))
@benchmark.Disabled('android')
class Media(benchmark.Benchmark):
"""Obtains media metrics for key user scenarios."""
test = media.Media
page_set = page_sets.ToughVideoCasesPageSet
@benchmark.Disabled
class MediaNetworkSimulation(benchmark.Benchmark):
"""Obtains media metrics under different network simulations."""
test = media.Media
page_set = page_sets.MediaCnsCasesPageSet
@benchmark.Enabled('android')
@benchmark.Disabled('l')
class MediaAndroid(benchmark.Benchmark):
"""Obtains media metrics for key user scenarios on Android."""
test = media.Media
tag = 'android'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_4k and 50 fps media files (garden* & crowd*).
options = {'page_label_filter_exclude': 'is_4k,is_50fps'}
@benchmark.Enabled('chromeos')
class MediaChromeOS4kOnly(benchmark.Benchmark):
"""Benchmark for media performance on ChromeOS using only is_4k test content.
"""
test = media.Media
tag = 'chromeOS4kOnly'
page_set = page_sets.ToughVideoCasesPageSet
options = {
'page_label_filter': 'is_4k',
# Exclude is_50fps test files: crbug/331816
'page_label_filter_exclude': 'is_50fps'
}
@benchmark.Enabled('chromeos')
class MediaChromeOS(benchmark.Benchmark):
"""Benchmark for media performance on all ChromeOS platforms.
This benchmark does not run is_4k content, there's a separate benchmark for
that.
"""
test = media.Media
tag = 'chromeOS'
page_set = page_sets.ToughVideoCasesPageSet
# Exclude is_50fps test files: crbug/331816
options = {'page_label_filter_exclude': 'is_4k,is_50fps'}
class MediaSourceExtensions(benchmark.Benchmark):
"""Obtains media metrics for key media source extensions functions."""
test = _MSEMeasurement
page_set = page_sets.MseCasesPageSet
def CustomizeBrowserOptions(self, options):
# Needed to allow XHR requests to return stream objects.
options.AppendExtraBrowserArgs(
['--enable-experimental-web-platform-features',
'--disable-gesture-requirement-for-media-playback'])
|
andrespires/python-buildpack
|
refs/heads/master
|
vendor/pip-pop/pip/utils/outdated.py
|
191
|
from __future__ import absolute_import
import datetime
import json
import logging
import os.path
import sys
from pip._vendor import lockfile
from pip._vendor.packaging import version as packaging_version
from pip.compat import total_seconds, WINDOWS
from pip.index import PyPI
from pip.locations import USER_CACHE_DIR, running_under_virtualenv
from pip.utils import ensure_dir, get_installed_version
from pip.utils.filesystem import check_path_owner
SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
logger = logging.getLogger(__name__)
class VirtualenvSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)
except (IOError, ValueError):
self.state = {}
def save(self, pypi_version, current_time):
# Attempt to write out our version check file
with open(self.statefile_path, "w") as statefile:
json.dump(
{
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
},
statefile,
sort_keys=True,
separators=(",", ":")
)
class GlobalSelfCheckState(object):
def __init__(self):
self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json")
# Load the existing state
try:
with open(self.statefile_path) as statefile:
self.state = json.load(statefile)[sys.prefix]
except (IOError, ValueError, KeyError):
self.state = {}
def save(self, pypi_version, current_time):
# Check to make sure that we own the directory
if not check_path_owner(os.path.dirname(self.statefile_path)):
return
# Now that we've ensured the directory is owned by this user, we'll go
# ahead and make sure that all our directories are created.
ensure_dir(os.path.dirname(self.statefile_path))
# Attempt to write out our version check file
with lockfile.LockFile(self.statefile_path):
if os.path.exists(self.statefile_path):
with open(self.statefile_path) as statefile:
state = json.load(statefile)
else:
state = {}
state[sys.prefix] = {
"last_check": current_time.strftime(SELFCHECK_DATE_FMT),
"pypi_version": pypi_version,
}
with open(self.statefile_path, "w") as statefile:
json.dump(state, statefile, sort_keys=True,
separators=(",", ":"))
def load_selfcheck_statefile():
if running_under_virtualenv():
return VirtualenvSelfCheckState()
else:
return GlobalSelfCheckState()
def pip_version_check(session):
"""Check for an update for pip.
Limit the frequency of checks to once per week. State is stored either in
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
of the pip script path.
"""
installed_version = get_installed_version("pip")
if installed_version is None:
return
pip_version = packaging_version.parse(installed_version)
pypi_version = None
try:
state = load_selfcheck_statefile()
current_time = datetime.datetime.utcnow()
# Determine if we need to refresh the state
if "last_check" in state.state and "pypi_version" in state.state:
last_check = datetime.datetime.strptime(
state.state["last_check"],
SELFCHECK_DATE_FMT
)
if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60:
pypi_version = state.state["pypi_version"]
# Refresh the version if we need to or just see if we need to warn
if pypi_version is None:
resp = session.get(
PyPI.pip_json_url,
headers={"Accept": "application/json"},
)
resp.raise_for_status()
pypi_version = [
v for v in sorted(
list(resp.json()["releases"]),
key=packaging_version.parse,
)
if not packaging_version.parse(v).is_prerelease
][-1]
# save that we've performed a check
state.save(pypi_version, current_time)
remote_version = packaging_version.parse(pypi_version)
# Determine if our pypi_version is older
if (pip_version < remote_version and
pip_version.base_version != remote_version.base_version):
# Advise "python -m pip" on Windows to avoid issues
# with overwriting pip.exe.
if WINDOWS:
pip_cmd = "python -m pip"
else:
pip_cmd = "pip"
logger.warning(
"You are using pip version %s, however version %s is "
"available.\nYou should consider upgrading via the "
"'%s install --upgrade pip' command." % (pip_version,
pypi_version,
pip_cmd)
)
except Exception:
logger.debug(
"There was an error checking the latest version of pip",
exc_info=True,
)
|
TNT-Samuel/Coding-Projects
|
refs/heads/master
|
DNS Server/Source - Copy/Lib/xmlrpc/client.py
|
11
|
#
# XML-RPC CLIENT LIBRARY
# $Id$
#
# an XML-RPC client interface for Python.
#
# the marshalling and response parser code can also be used to
# implement XML-RPC servers.
#
# Notes:
# this version is designed to work with Python 2.1 or newer.
#
# History:
# 1999-01-14 fl Created
# 1999-01-15 fl Changed dateTime to use localtime
# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service
# 1999-01-19 fl Fixed array data element (from Skip Montanaro)
# 1999-01-21 fl Fixed dateTime constructor, etc.
# 1999-02-02 fl Added fault handling, handle empty sequences, etc.
# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro)
# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8)
# 2000-11-28 fl Changed boolean to check the truth value of its argument
# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches
# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1)
# 2001-03-28 fl Make sure response tuple is a singleton
# 2001-03-29 fl Don't require empty params element (from Nicholas Riley)
# 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2)
# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from Paul Prescod)
# 2001-09-03 fl Allow Transport subclass to override getparser
# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup)
# 2001-10-01 fl Remove containers from memo cache when done with them
# 2001-10-01 fl Use faster escape method (80% dumps speedup)
# 2001-10-02 fl More dumps microtuning
# 2001-10-04 fl Make sure import expat gets a parser (from Guido van Rossum)
# 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow
# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems)
# 2001-11-12 fl Use repr() to marshal doubles (from Paul Felix)
# 2002-03-17 fl Avoid buffered read when possible (from James Rucker)
# 2002-04-07 fl Added pythondoc comments
# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers
# 2002-05-15 fl Added error constants (from Andrew Kuchling)
# 2002-06-27 fl Merged with Python CVS version
# 2002-10-22 fl Added basic authentication (based on code from Phillip Eby)
# 2003-01-22 sm Add support for the bool type
# 2003-02-27 gvr Remove apply calls
# 2003-04-24 sm Use cStringIO if available
# 2003-04-25 ak Add support for nil
# 2003-06-15 gn Add support for time.struct_time
# 2003-07-12 gp Correct marshalling of Faults
# 2003-10-31 mvl Add multicall support
# 2004-08-20 mvl Bump minimum supported Python version to 2.1
# 2014-12-02 ch/doko Add workaround for gzip bomb vulnerability
#
# Copyright (c) 1999-2002 by Secret Labs AB.
# Copyright (c) 1999-2002 by Fredrik Lundh.
#
# info@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The XML-RPC client interface is
#
# Copyright (c) 1999-2002 by Secret Labs AB
# Copyright (c) 1999-2002 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
"""
An XML-RPC client interface for Python.
The marshalling and response parser code can also be used to
implement XML-RPC servers.
Exported exceptions:
Error Base class for client errors
ProtocolError Indicates an HTTP protocol error
ResponseError Indicates a broken response package
Fault Indicates an XML-RPC fault package
Exported classes:
ServerProxy Represents a logical connection to an XML-RPC server
MultiCall Executor of boxcared xmlrpc requests
DateTime dateTime wrapper for an ISO 8601 string or time tuple or
localtime integer value to generate a "dateTime.iso8601"
XML-RPC value
Binary binary data wrapper
Marshaller Generate an XML-RPC params chunk from a Python data structure
Unmarshaller Unmarshal an XML-RPC response from incoming XML event message
Transport Handles an HTTP transaction to an XML-RPC server
SafeTransport Handles an HTTPS transaction to an XML-RPC server
Exported constants:
(none)
Exported functions:
getparser Create instance of the fastest available parser & attach
to an unmarshalling object
dumps Convert an argument tuple or a Fault instance to an XML-RPC
request (or response, if the methodresponse option is used).
loads Convert an XML-RPC packet to unmarshalled data plus a method
name (None if not present).
"""
import base64
import sys
import time
from datetime import datetime
from decimal import Decimal
import http.client
import urllib.parse
from xml.parsers import expat
import errno
from io import BytesIO
try:
import gzip
except ImportError:
gzip = None #python can be built without zlib/gzip support
# --------------------------------------------------------------------
# Internal stuff
def escape(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
return s.replace(">", ">",)
# used in User-Agent header sent
__version__ = '%d.%d' % sys.version_info[:2]
# xmlrpc integer limits
MAXINT = 2**31-1
MININT = -2**31
# --------------------------------------------------------------------
# Error constants (from Dan Libby's specification at
# http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php)
# Ranges of errors
PARSE_ERROR = -32700
SERVER_ERROR = -32600
APPLICATION_ERROR = -32500
SYSTEM_ERROR = -32400
TRANSPORT_ERROR = -32300
# Specific errors
NOT_WELLFORMED_ERROR = -32700
UNSUPPORTED_ENCODING = -32701
INVALID_ENCODING_CHAR = -32702
INVALID_XMLRPC = -32600
METHOD_NOT_FOUND = -32601
INVALID_METHOD_PARAMS = -32602
INTERNAL_ERROR = -32603
# --------------------------------------------------------------------
# Exceptions
##
# Base class for all kinds of client-side errors.
class Error(Exception):
"""Base class for client errors."""
def __str__(self):
return repr(self)
##
# Indicates an HTTP-level protocol error. This is raised by the HTTP
# transport layer, if the server returns an error code other than 200
# (OK).
#
# @param url The target URL.
# @param errcode The HTTP error code.
# @param errmsg The HTTP error message.
# @param headers The HTTP header dictionary.
class ProtocolError(Error):
"""Indicates an HTTP protocol error."""
def __init__(self, url, errcode, errmsg, headers):
Error.__init__(self)
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
def __repr__(self):
return (
"<%s for %s: %s %s>" %
(self.__class__.__name__, self.url, self.errcode, self.errmsg)
)
##
# Indicates a broken XML-RPC response package. This exception is
# raised by the unmarshalling layer, if the XML-RPC response is
# malformed.
class ResponseError(Error):
"""Indicates a broken response package."""
pass
##
# Indicates an XML-RPC fault response package. This exception is
# raised by the unmarshalling layer, if the XML-RPC response contains
# a fault string. This exception can also be used as a class, to
# generate a fault XML-RPC message.
#
# @param faultCode The XML-RPC fault code.
# @param faultString The XML-RPC fault string.
class Fault(Error):
"""Indicates an XML-RPC fault package."""
def __init__(self, faultCode, faultString, **extra):
Error.__init__(self)
self.faultCode = faultCode
self.faultString = faultString
def __repr__(self):
return "<%s %s: %r>" % (self.__class__.__name__,
self.faultCode, self.faultString)
# --------------------------------------------------------------------
# Special values
##
# Backwards compatibility
boolean = Boolean = bool
##
# Wrapper for XML-RPC DateTime values. This converts a time value to
# the format used by XML-RPC.
# <p>
# The value can be given as a datetime object, as a string in the
# format "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by
# time.localtime()), or an integer value (as returned by time.time()).
# The wrapper uses time.localtime() to convert an integer to a time
# tuple.
#
# @param value The time, given as a datetime object, an ISO 8601 string,
# a time tuple, or an integer time value.
# Issue #13305: different format codes across platforms
_day0 = datetime(1, 1, 1)
if _day0.strftime('%Y') == '0001': # Mac OS X
def _iso8601_format(value):
return value.strftime("%Y%m%dT%H:%M:%S")
elif _day0.strftime('%4Y') == '0001': # Linux
def _iso8601_format(value):
return value.strftime("%4Y%m%dT%H:%M:%S")
else:
def _iso8601_format(value):
return value.strftime("%Y%m%dT%H:%M:%S").zfill(17)
del _day0
def _strftime(value):
if isinstance(value, datetime):
return _iso8601_format(value)
if not isinstance(value, (tuple, time.struct_time)):
if value == 0:
value = time.time()
value = time.localtime(value)
return "%04d%02d%02dT%02d:%02d:%02d" % value[:6]
class DateTime:
"""DateTime wrapper for an ISO 8601 string or time tuple or
localtime integer value to generate 'dateTime.iso8601' XML-RPC
value.
"""
def __init__(self, value=0):
if isinstance(value, str):
self.value = value
else:
self.value = _strftime(value)
def make_comparable(self, other):
if isinstance(other, DateTime):
s = self.value
o = other.value
elif isinstance(other, datetime):
s = self.value
o = _iso8601_format(other)
elif isinstance(other, str):
s = self.value
o = other
elif hasattr(other, "timetuple"):
s = self.timetuple()
o = other.timetuple()
else:
otype = (hasattr(other, "__class__")
and other.__class__.__name__
or type(other))
raise TypeError("Can't compare %s and %s" %
(self.__class__.__name__, otype))
return s, o
def __lt__(self, other):
s, o = self.make_comparable(other)
return s < o
def __le__(self, other):
s, o = self.make_comparable(other)
return s <= o
def __gt__(self, other):
s, o = self.make_comparable(other)
return s > o
def __ge__(self, other):
s, o = self.make_comparable(other)
return s >= o
def __eq__(self, other):
s, o = self.make_comparable(other)
return s == o
def timetuple(self):
return time.strptime(self.value, "%Y%m%dT%H:%M:%S")
##
# Get date/time value.
#
# @return Date/time value, as an ISO 8601 string.
def __str__(self):
return self.value
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__, self.value, id(self))
def decode(self, data):
self.value = str(data).strip()
def encode(self, out):
out.write("<value><dateTime.iso8601>")
out.write(self.value)
out.write("</dateTime.iso8601></value>\n")
def _datetime(data):
# decode xml element contents into a DateTime structure.
value = DateTime()
value.decode(data)
return value
def _datetime_type(data):
return datetime.strptime(data, "%Y%m%dT%H:%M:%S")
##
# Wrapper for binary data. This can be used to transport any kind
# of binary data over XML-RPC, using BASE64 encoding.
#
# @param data An 8-bit string containing arbitrary data.
class Binary:
"""Wrapper for binary data."""
def __init__(self, data=None):
if data is None:
data = b""
else:
if not isinstance(data, (bytes, bytearray)):
raise TypeError("expected bytes or bytearray, not %s" %
data.__class__.__name__)
data = bytes(data) # Make a copy of the bytes!
self.data = data
##
# Get buffer contents.
#
# @return Buffer contents, as an 8-bit string.
def __str__(self):
return str(self.data, "latin-1") # XXX encoding?!
def __eq__(self, other):
if isinstance(other, Binary):
other = other.data
return self.data == other
def decode(self, data):
self.data = base64.decodebytes(data)
def encode(self, out):
out.write("<value><base64>\n")
encoded = base64.encodebytes(self.data)
out.write(encoded.decode('ascii'))
out.write("</base64></value>\n")
def _binary(data):
# decode xml element contents into a Binary structure
value = Binary()
value.decode(data)
return value
WRAPPERS = (DateTime, Binary)
# --------------------------------------------------------------------
# XML parsers
class ExpatParser:
# fast expat parser for Python 2.0 and later.
def __init__(self, target):
self._parser = parser = expat.ParserCreate(None, None)
self._target = target
parser.StartElementHandler = target.start
parser.EndElementHandler = target.end
parser.CharacterDataHandler = target.data
encoding = None
target.xml(encoding, None)
def feed(self, data):
self._parser.Parse(data, 0)
def close(self):
try:
parser = self._parser
except AttributeError:
pass
else:
del self._target, self._parser # get rid of circular references
parser.Parse(b"", True) # end of data
# --------------------------------------------------------------------
# XML-RPC marshalling and unmarshalling code
##
# XML-RPC marshaller.
#
# @param encoding Default encoding for 8-bit strings. The default
# value is None (interpreted as UTF-8).
# @see dumps
class Marshaller:
"""Generate an XML-RPC params chunk from a Python data structure.
Create a Marshaller instance for each set of parameters, and use
the "dumps" method to convert your data (represented as a tuple)
to an XML-RPC params chunk. To write a fault response, pass a
Fault instance instead. You may prefer to use the "dumps" module
function for this purpose.
"""
# by the way, if you don't understand what's going on in here,
# that's perfectly ok.
def __init__(self, encoding=None, allow_none=False):
self.memo = {}
self.data = None
self.encoding = encoding
self.allow_none = allow_none
dispatch = {}
def dumps(self, values):
out = []
write = out.append
dump = self.__dump
if isinstance(values, Fault):
# fault instance
write("<fault>\n")
dump({'faultCode': values.faultCode,
'faultString': values.faultString},
write)
write("</fault>\n")
else:
# parameter block
# FIXME: the xml-rpc specification allows us to leave out
# the entire <params> block if there are no parameters.
# however, changing this may break older code (including
# old versions of xmlrpclib.py), so this is better left as
# is for now. See @XMLRPC3 for more information. /F
write("<params>\n")
for v in values:
write("<param>\n")
dump(v, write)
write("</param>\n")
write("</params>\n")
result = "".join(out)
return result
def __dump(self, value, write):
try:
f = self.dispatch[type(value)]
except KeyError:
# check if this object can be marshalled as a structure
if not hasattr(value, '__dict__'):
raise TypeError("cannot marshal %s objects" % type(value))
# check if this class is a sub-class of a basic type,
# because we don't know how to marshal these types
# (e.g. a string sub-class)
for type_ in type(value).__mro__:
if type_ in self.dispatch.keys():
raise TypeError("cannot marshal %s objects" % type(value))
# XXX(twouters): using "_arbitrary_instance" as key as a quick-fix
# for the p3yk merge, this should probably be fixed more neatly.
f = self.dispatch["_arbitrary_instance"]
f(self, value, write)
def dump_nil (self, value, write):
if not self.allow_none:
raise TypeError("cannot marshal None unless allow_none is enabled")
write("<value><nil/></value>")
dispatch[type(None)] = dump_nil
def dump_bool(self, value, write):
write("<value><boolean>")
write(value and "1" or "0")
write("</boolean></value>\n")
dispatch[bool] = dump_bool
def dump_long(self, value, write):
if value > MAXINT or value < MININT:
raise OverflowError("int exceeds XML-RPC limits")
write("<value><int>")
write(str(int(value)))
write("</int></value>\n")
dispatch[int] = dump_long
# backward compatible
dump_int = dump_long
def dump_double(self, value, write):
write("<value><double>")
write(repr(value))
write("</double></value>\n")
dispatch[float] = dump_double
def dump_unicode(self, value, write, escape=escape):
write("<value><string>")
write(escape(value))
write("</string></value>\n")
dispatch[str] = dump_unicode
def dump_bytes(self, value, write):
write("<value><base64>\n")
encoded = base64.encodebytes(value)
write(encoded.decode('ascii'))
write("</base64></value>\n")
dispatch[bytes] = dump_bytes
dispatch[bytearray] = dump_bytes
def dump_array(self, value, write):
i = id(value)
if i in self.memo:
raise TypeError("cannot marshal recursive sequences")
self.memo[i] = None
dump = self.__dump
write("<value><array><data>\n")
for v in value:
dump(v, write)
write("</data></array></value>\n")
del self.memo[i]
dispatch[tuple] = dump_array
dispatch[list] = dump_array
def dump_struct(self, value, write, escape=escape):
i = id(value)
if i in self.memo:
raise TypeError("cannot marshal recursive dictionaries")
self.memo[i] = None
dump = self.__dump
write("<value><struct>\n")
for k, v in value.items():
write("<member>\n")
if not isinstance(k, str):
raise TypeError("dictionary key must be string")
write("<name>%s</name>\n" % escape(k))
dump(v, write)
write("</member>\n")
write("</struct></value>\n")
del self.memo[i]
dispatch[dict] = dump_struct
def dump_datetime(self, value, write):
write("<value><dateTime.iso8601>")
write(_strftime(value))
write("</dateTime.iso8601></value>\n")
dispatch[datetime] = dump_datetime
def dump_instance(self, value, write):
# check for special wrappers
if value.__class__ in WRAPPERS:
self.write = write
value.encode(self)
del self.write
else:
# store instance attributes as a struct (really?)
self.dump_struct(value.__dict__, write)
dispatch[DateTime] = dump_instance
dispatch[Binary] = dump_instance
# XXX(twouters): using "_arbitrary_instance" as key as a quick-fix
# for the p3yk merge, this should probably be fixed more neatly.
dispatch["_arbitrary_instance"] = dump_instance
##
# XML-RPC unmarshaller.
#
# @see loads
class Unmarshaller:
"""Unmarshal an XML-RPC response, based on incoming XML event
messages (start, data, end). Call close() to get the resulting
data structure.
Note that this reader is fairly tolerant, and gladly accepts bogus
XML-RPC data without complaining (but not bogus XML).
"""
# and again, if you don't understand what's going on in here,
# that's perfectly ok.
def __init__(self, use_datetime=False, use_builtin_types=False):
self._type = None
self._stack = []
self._marks = []
self._data = []
self._value = False
self._methodname = None
self._encoding = "utf-8"
self.append = self._stack.append
self._use_datetime = use_builtin_types or use_datetime
self._use_bytes = use_builtin_types
def close(self):
# return response tuple and target method
if self._type is None or self._marks:
raise ResponseError()
if self._type == "fault":
raise Fault(**self._stack[0])
return tuple(self._stack)
def getmethodname(self):
return self._methodname
#
# event handlers
def xml(self, encoding, standalone):
self._encoding = encoding
# FIXME: assert standalone == 1 ???
def start(self, tag, attrs):
# prepare to handle this element
if ':' in tag:
tag = tag.split(':')[-1]
if tag == "array" or tag == "struct":
self._marks.append(len(self._stack))
self._data = []
if self._value and tag not in self.dispatch:
raise ResponseError("unknown tag %r" % tag)
self._value = (tag == "value")
def data(self, text):
self._data.append(text)
def end(self, tag):
# call the appropriate end tag handler
try:
f = self.dispatch[tag]
except KeyError:
if ':' not in tag:
return # unknown tag ?
try:
f = self.dispatch[tag.split(':')[-1]]
except KeyError:
return # unknown tag ?
return f(self, "".join(self._data))
#
# accelerator support
def end_dispatch(self, tag, data):
# dispatch data
try:
f = self.dispatch[tag]
except KeyError:
if ':' not in tag:
return # unknown tag ?
try:
f = self.dispatch[tag.split(':')[-1]]
except KeyError:
return # unknown tag ?
return f(self, data)
#
# element decoders
dispatch = {}
def end_nil (self, data):
self.append(None)
self._value = 0
dispatch["nil"] = end_nil
def end_boolean(self, data):
if data == "0":
self.append(False)
elif data == "1":
self.append(True)
else:
raise TypeError("bad boolean value")
self._value = 0
dispatch["boolean"] = end_boolean
def end_int(self, data):
self.append(int(data))
self._value = 0
dispatch["i1"] = end_int
dispatch["i2"] = end_int
dispatch["i4"] = end_int
dispatch["i8"] = end_int
dispatch["int"] = end_int
dispatch["biginteger"] = end_int
def end_double(self, data):
self.append(float(data))
self._value = 0
dispatch["double"] = end_double
dispatch["float"] = end_double
def end_bigdecimal(self, data):
self.append(Decimal(data))
self._value = 0
dispatch["bigdecimal"] = end_bigdecimal
def end_string(self, data):
if self._encoding:
data = data.decode(self._encoding)
self.append(data)
self._value = 0
dispatch["string"] = end_string
dispatch["name"] = end_string # struct keys are always strings
def end_array(self, data):
mark = self._marks.pop()
# map arrays to Python lists
self._stack[mark:] = [self._stack[mark:]]
self._value = 0
dispatch["array"] = end_array
def end_struct(self, data):
mark = self._marks.pop()
# map structs to Python dictionaries
dict = {}
items = self._stack[mark:]
for i in range(0, len(items), 2):
dict[items[i]] = items[i+1]
self._stack[mark:] = [dict]
self._value = 0
dispatch["struct"] = end_struct
def end_base64(self, data):
value = Binary()
value.decode(data.encode("ascii"))
if self._use_bytes:
value = value.data
self.append(value)
self._value = 0
dispatch["base64"] = end_base64
def end_dateTime(self, data):
value = DateTime()
value.decode(data)
if self._use_datetime:
value = _datetime_type(data)
self.append(value)
dispatch["dateTime.iso8601"] = end_dateTime
def end_value(self, data):
# if we stumble upon a value element with no internal
# elements, treat it as a string element
if self._value:
self.end_string(data)
dispatch["value"] = end_value
def end_params(self, data):
self._type = "params"
dispatch["params"] = end_params
def end_fault(self, data):
self._type = "fault"
dispatch["fault"] = end_fault
def end_methodName(self, data):
if self._encoding:
data = data.decode(self._encoding)
self._methodname = data
self._type = "methodName" # no params
dispatch["methodName"] = end_methodName
## Multicall support
#
class _MultiCallMethod:
# some lesser magic to store calls made to a MultiCall object
# for batch execution
def __init__(self, call_list, name):
self.__call_list = call_list
self.__name = name
def __getattr__(self, name):
return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name))
def __call__(self, *args):
self.__call_list.append((self.__name, args))
class MultiCallIterator:
"""Iterates over the results of a multicall. Exceptions are
raised in response to xmlrpc faults."""
def __init__(self, results):
self.results = results
def __getitem__(self, i):
item = self.results[i]
if type(item) == type({}):
raise Fault(item['faultCode'], item['faultString'])
elif type(item) == type([]):
return item[0]
else:
raise ValueError("unexpected type in multicall result")
class MultiCall:
"""server -> an object used to boxcar method calls
server should be a ServerProxy object.
Methods can be added to the MultiCall using normal
method call syntax e.g.:
multicall = MultiCall(server_proxy)
multicall.add(2,3)
multicall.get_address("Guido")
To execute the multicall, call the MultiCall object e.g.:
add_result, address = multicall()
"""
def __init__(self, server):
self.__server = server
self.__call_list = []
def __repr__(self):
return "<%s at %#x>" % (self.__class__.__name__, id(self))
__str__ = __repr__
def __getattr__(self, name):
return _MultiCallMethod(self.__call_list, name)
def __call__(self):
marshalled_list = []
for name, args in self.__call_list:
marshalled_list.append({'methodName' : name, 'params' : args})
return MultiCallIterator(self.__server.system.multicall(marshalled_list))
# --------------------------------------------------------------------
# convenience functions
FastMarshaller = FastParser = FastUnmarshaller = None
##
# Create a parser object, and connect it to an unmarshalling instance.
# This function picks the fastest available XML parser.
#
# return A (parser, unmarshaller) tuple.
def getparser(use_datetime=False, use_builtin_types=False):
"""getparser() -> parser, unmarshaller
Create an instance of the fastest available parser, and attach it
to an unmarshalling object. Return both objects.
"""
if FastParser and FastUnmarshaller:
if use_builtin_types:
mkdatetime = _datetime_type
mkbytes = base64.decodebytes
elif use_datetime:
mkdatetime = _datetime_type
mkbytes = _binary
else:
mkdatetime = _datetime
mkbytes = _binary
target = FastUnmarshaller(True, False, mkbytes, mkdatetime, Fault)
parser = FastParser(target)
else:
target = Unmarshaller(use_datetime=use_datetime, use_builtin_types=use_builtin_types)
if FastParser:
parser = FastParser(target)
else:
parser = ExpatParser(target)
return parser, target
##
# Convert a Python tuple or a Fault instance to an XML-RPC packet.
#
# @def dumps(params, **options)
# @param params A tuple or Fault instance.
# @keyparam methodname If given, create a methodCall request for
# this method name.
# @keyparam methodresponse If given, create a methodResponse packet.
# If used with a tuple, the tuple must be a singleton (that is,
# it must contain exactly one element).
# @keyparam encoding The packet encoding.
# @return A string containing marshalled data.
def dumps(params, methodname=None, methodresponse=None, encoding=None,
allow_none=False):
"""data [,options] -> marshalled data
Convert an argument tuple or a Fault instance to an XML-RPC
request (or response, if the methodresponse option is used).
In addition to the data object, the following options can be given
as keyword arguments:
methodname: the method name for a methodCall packet
methodresponse: true to create a methodResponse packet.
If this option is used with a tuple, the tuple must be
a singleton (i.e. it can contain only one element).
encoding: the packet encoding (default is UTF-8)
All byte strings in the data structure are assumed to use the
packet encoding. Unicode strings are automatically converted,
where necessary.
"""
assert isinstance(params, (tuple, Fault)), "argument must be tuple or Fault instance"
if isinstance(params, Fault):
methodresponse = 1
elif methodresponse and isinstance(params, tuple):
assert len(params) == 1, "response tuple must be a singleton"
if not encoding:
encoding = "utf-8"
if FastMarshaller:
m = FastMarshaller(encoding)
else:
m = Marshaller(encoding, allow_none)
data = m.dumps(params)
if encoding != "utf-8":
xmlheader = "<?xml version='1.0' encoding='%s'?>\n" % str(encoding)
else:
xmlheader = "<?xml version='1.0'?>\n" # utf-8 is default
# standard XML-RPC wrappings
if methodname:
# a method call
data = (
xmlheader,
"<methodCall>\n"
"<methodName>", methodname, "</methodName>\n",
data,
"</methodCall>\n"
)
elif methodresponse:
# a method response, or a fault structure
data = (
xmlheader,
"<methodResponse>\n",
data,
"</methodResponse>\n"
)
else:
return data # return as is
return "".join(data)
##
# Convert an XML-RPC packet to a Python object. If the XML-RPC packet
# represents a fault condition, this function raises a Fault exception.
#
# @param data An XML-RPC packet, given as an 8-bit string.
# @return A tuple containing the unpacked data, and the method name
# (None if not present).
# @see Fault
def loads(data, use_datetime=False, use_builtin_types=False):
"""data -> unmarshalled data, method name
Convert an XML-RPC packet to unmarshalled data plus a method
name (None if not present).
If the XML-RPC packet represents a fault condition, this function
raises a Fault exception.
"""
p, u = getparser(use_datetime=use_datetime, use_builtin_types=use_builtin_types)
p.feed(data)
p.close()
return u.close(), u.getmethodname()
##
# Encode a string using the gzip content encoding such as specified by the
# Content-Encoding: gzip
# in the HTTP header, as described in RFC 1952
#
# @param data the unencoded data
# @return the encoded data
def gzip_encode(data):
"""data -> gzip encoded data
Encode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
f = BytesIO()
with gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1) as gzf:
gzf.write(data)
return f.getvalue()
##
# Decode a string using the gzip content encoding such as specified by the
# Content-Encoding: gzip
# in the HTTP header, as described in RFC 1952
#
# @param data The encoded data
# @keyparam max_decode Maximum bytes to decode (20 MiB default), use negative
# values for unlimited decoding
# @return the unencoded data
# @raises ValueError if data is not correctly coded.
# @raises ValueError if max gzipped payload length exceeded
def gzip_decode(data, max_decode=20971520):
"""gzip encoded data -> unencoded data
Decode data using the gzip content encoding as described in RFC 1952
"""
if not gzip:
raise NotImplementedError
with gzip.GzipFile(mode="rb", fileobj=BytesIO(data)) as gzf:
try:
if max_decode < 0: # no limit
decoded = gzf.read()
else:
decoded = gzf.read(max_decode + 1)
except OSError:
raise ValueError("invalid data")
if max_decode >= 0 and len(decoded) > max_decode:
raise ValueError("max gzipped payload length exceeded")
return decoded
##
# Return a decoded file-like object for the gzip encoding
# as described in RFC 1952.
#
# @param response A stream supporting a read() method
# @return a file-like object that the decoded data can be read() from
class GzipDecodedResponse(gzip.GzipFile if gzip else object):
"""a file-like object to decode a response encoded with the gzip
method, as described in RFC 1952.
"""
def __init__(self, response):
#response doesn't support tell() and read(), required by
#GzipFile
if not gzip:
raise NotImplementedError
self.io = BytesIO(response.read())
gzip.GzipFile.__init__(self, mode="rb", fileobj=self.io)
def close(self):
try:
gzip.GzipFile.close(self)
finally:
self.io.close()
# --------------------------------------------------------------------
# request dispatcher
class _Method:
# some magic to bind an XML-RPC method to an RPC server.
# supports "nested" methods (e.g. examples.getStateName)
def __init__(self, send, name):
self.__send = send
self.__name = name
def __getattr__(self, name):
return _Method(self.__send, "%s.%s" % (self.__name, name))
def __call__(self, *args):
return self.__send(self.__name, args)
##
# Standard transport class for XML-RPC over HTTP.
# <p>
# You can create custom transports by subclassing this method, and
# overriding selected methods.
class Transport:
"""Handles an HTTP transaction to an XML-RPC server."""
# client identifier (may be overridden)
user_agent = "Python-xmlrpc/%s" % __version__
#if true, we'll request gzip encoding
accept_gzip_encoding = True
# if positive, encode request using gzip if it exceeds this threshold
# note that many servers will get confused, so only use it if you know
# that they can decode such a request
encode_threshold = None #None = don't encode
def __init__(self, use_datetime=False, use_builtin_types=False):
self._use_datetime = use_datetime
self._use_builtin_types = use_builtin_types
self._connection = (None, None)
self._extra_headers = []
##
# Send a complete request, and parse the response.
# Retry request if a cached connection has disconnected.
#
# @param host Target host.
# @param handler Target PRC handler.
# @param request_body XML-RPC request body.
# @param verbose Debugging flag.
# @return Parsed response.
def request(self, host, handler, request_body, verbose=False):
#retry request once if cached connection has gone cold
for i in (0, 1):
try:
return self.single_request(host, handler, request_body, verbose)
except http.client.RemoteDisconnected:
if i:
raise
except OSError as e:
if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED,
errno.EPIPE):
raise
def single_request(self, host, handler, request_body, verbose=False):
# issue XML-RPC request
try:
http_conn = self.send_request(host, handler, request_body, verbose)
resp = http_conn.getresponse()
if resp.status == 200:
self.verbose = verbose
return self.parse_response(resp)
except Fault:
raise
except Exception:
#All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
#We got an error response.
#Discard any response data and raise exception
if resp.getheader("content-length", ""):
resp.read()
raise ProtocolError(
host + handler,
resp.status, resp.reason,
dict(resp.getheaders())
)
##
# Create parser.
#
# @return A 2-tuple containing a parser and an unmarshaller.
def getparser(self):
# get parser and unmarshaller
return getparser(use_datetime=self._use_datetime,
use_builtin_types=self._use_builtin_types)
##
# Get authorization info from host parameter
# Host may be a string, or a (host, x509-dict) tuple; if a string,
# it is checked for a "user:pw@host" format, and a "Basic
# Authentication" header is added if appropriate.
#
# @param host Host descriptor (URL or (URL, x509 info) tuple).
# @return A 3-tuple containing (actual host, extra headers,
# x509 info). The header and x509 fields may be None.
def get_host_info(self, host):
x509 = {}
if isinstance(host, tuple):
host, x509 = host
auth, host = urllib.parse.splituser(host)
if auth:
auth = urllib.parse.unquote_to_bytes(auth)
auth = base64.encodebytes(auth).decode("utf-8")
auth = "".join(auth.split()) # get rid of whitespace
extra_headers = [
("Authorization", "Basic " + auth)
]
else:
extra_headers = []
return host, extra_headers, x509
##
# Connect to server.
#
# @param host Target host.
# @return An HTTPConnection object
def make_connection(self, host):
#return an existing connection if possible. This allows
#HTTP/1.1 keep-alive.
if self._connection and host == self._connection[0]:
return self._connection[1]
# create a HTTP connection object from a host descriptor
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, http.client.HTTPConnection(chost)
return self._connection[1]
##
# Clear any cached connection object.
# Used in the event of socket errors.
#
def close(self):
host, connection = self._connection
if connection:
self._connection = (None, None)
connection.close()
##
# Send HTTP request.
#
# @param host Host descriptor (URL or (URL, x509 info) tuple).
# @param handler Target RPC handler (a path relative to host)
# @param request_body The XML-RPC request body
# @param debug Enable debugging if debug is true.
# @return An HTTPConnection.
def send_request(self, host, handler, request_body, debug):
connection = self.make_connection(host)
headers = self._extra_headers[:]
if debug:
connection.set_debuglevel(1)
if self.accept_gzip_encoding and gzip:
connection.putrequest("POST", handler, skip_accept_encoding=True)
headers.append(("Accept-Encoding", "gzip"))
else:
connection.putrequest("POST", handler)
headers.append(("Content-Type", "text/xml"))
headers.append(("User-Agent", self.user_agent))
self.send_headers(connection, headers)
self.send_content(connection, request_body)
return connection
##
# Send request headers.
# This function provides a useful hook for subclassing
#
# @param connection httpConnection.
# @param headers list of key,value pairs for HTTP headers
def send_headers(self, connection, headers):
for key, val in headers:
connection.putheader(key, val)
##
# Send request body.
# This function provides a useful hook for subclassing
#
# @param connection httpConnection.
# @param request_body XML-RPC request body.
def send_content(self, connection, request_body):
#optionally encode the request
if (self.encode_threshold is not None and
self.encode_threshold < len(request_body) and
gzip):
connection.putheader("Content-Encoding", "gzip")
request_body = gzip_encode(request_body)
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders(request_body)
##
# Parse response.
#
# @param file Stream.
# @return Response tuple and target method.
def parse_response(self, response):
# read response data from httpresponse, and parse it
# Check for new http response object, otherwise it is a file object.
if hasattr(response, 'getheader'):
if response.getheader("Content-Encoding", "") == "gzip":
stream = GzipDecodedResponse(response)
else:
stream = response
else:
stream = response
p, u = self.getparser()
while 1:
data = stream.read(1024)
if not data:
break
if self.verbose:
print("body:", repr(data))
p.feed(data)
if stream is not response:
stream.close()
p.close()
return u.close()
##
# Standard transport class for XML-RPC over HTTPS.
class SafeTransport(Transport):
"""Handles an HTTPS transaction to an XML-RPC server."""
def __init__(self, use_datetime=False, use_builtin_types=False, *,
context=None):
super().__init__(use_datetime=use_datetime, use_builtin_types=use_builtin_types)
self.context = context
# FIXME: mostly untested
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
if not hasattr(http.client, "HTTPSConnection"):
raise NotImplementedError(
"your version of http.client doesn't support HTTPS")
# create a HTTPS connection object from a host descriptor
# host may be a string, or a (host, x509-dict) tuple
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, http.client.HTTPSConnection(chost,
None, context=self.context, **(x509 or {}))
return self._connection[1]
##
# Standard server proxy. This class establishes a virtual connection
# to an XML-RPC server.
# <p>
# This class is available as ServerProxy and Server. New code should
# use ServerProxy, to avoid confusion.
#
# @def ServerProxy(uri, **options)
# @param uri The connection point on the server.
# @keyparam transport A transport factory, compatible with the
# standard transport class.
# @keyparam encoding The default encoding used for 8-bit strings
# (default is UTF-8).
# @keyparam verbose Use a true value to enable debugging output.
# (printed to standard output).
# @see Transport
class ServerProxy:
"""uri [,options] -> a logical connection to an XML-RPC server
uri is the connection point on the server, given as
scheme://host/target.
The standard implementation always supports the "http" scheme. If
SSL socket support is available (Python 2.0), it also supports
"https".
If the target part and the slash preceding it are both omitted,
"/RPC2" is assumed.
The following options can be given as keyword arguments:
transport: a transport factory
encoding: the request encoding (default is UTF-8)
All 8-bit strings passed to the server proxy are assumed to use
the given encoding.
"""
def __init__(self, uri, transport=None, encoding=None, verbose=False,
allow_none=False, use_datetime=False, use_builtin_types=False,
*, context=None):
# establish a "logical" server connection
# get the url
type, uri = urllib.parse.splittype(uri)
if type not in ("http", "https"):
raise OSError("unsupported XML-RPC protocol")
self.__host, self.__handler = urllib.parse.splithost(uri)
if not self.__handler:
self.__handler = "/RPC2"
if transport is None:
if type == "https":
handler = SafeTransport
extra_kwargs = {"context": context}
else:
handler = Transport
extra_kwargs = {}
transport = handler(use_datetime=use_datetime,
use_builtin_types=use_builtin_types,
**extra_kwargs)
self.__transport = transport
self.__encoding = encoding or 'utf-8'
self.__verbose = verbose
self.__allow_none = allow_none
def __close(self):
self.__transport.close()
def __request(self, methodname, params):
# call a method on the remote server
request = dumps(params, methodname, encoding=self.__encoding,
allow_none=self.__allow_none).encode(self.__encoding, 'xmlcharrefreplace')
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
if len(response) == 1:
response = response[0]
return response
def __repr__(self):
return (
"<%s for %s%s>" %
(self.__class__.__name__, self.__host, self.__handler)
)
__str__ = __repr__
def __getattr__(self, name):
# magic method dispatcher
return _Method(self.__request, name)
# note: to call a remote object with a non-standard name, use
# result getattr(server, "strange-python-name")(args)
def __call__(self, attr):
"""A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__
"""
if attr == "close":
return self.__close
elif attr == "transport":
return self.__transport
raise AttributeError("Attribute %r not found" % (attr,))
def __enter__(self):
return self
def __exit__(self, *args):
self.__close()
# compatibility
Server = ServerProxy
# --------------------------------------------------------------------
# test code
if __name__ == "__main__":
# simple test program (from the XML-RPC specification)
# local server, available from Lib/xmlrpc/server.py
server = ServerProxy("http://localhost:8000")
try:
print(server.currentTime.getCurrentTime())
except Error as v:
print("ERROR", v)
multi = MultiCall(server)
multi.getData()
multi.pow(2,9)
multi.add(1,2)
try:
for response in multi():
print(response)
except Error as v:
print("ERROR", v)
|
Justin-Yuan/Image2Music-Generator
|
refs/heads/master
|
library/jython2.5.3/Lib/test/test_threading_jy.py
|
9
|
"""Misc threading module tests
Made for Jython.
"""
from __future__ import with_statement
import random
import subprocess
import sys
import threading
import time
import unittest
from subprocess import PIPE, Popen
from test import test_support
from threading import Condition, Lock, Thread
from java.lang import Thread as JThread, InterruptedException
class ThreadingTestCase(unittest.TestCase):
def test_str_name(self):
t = Thread(name=1)
self.assertEqual(t.getName(), '1')
t.setName(2)
self.assertEqual(t.getName(), '2')
# make sure activeCount() gets decremented (see issue 1348)
def test_activeCount(self):
activeBefore = threading.activeCount()
activeCount = 10
for i in range(activeCount):
t = Thread(target=self._sleep, args=(i,))
t.setDaemon(0)
t.start()
polls = activeCount
while activeCount > activeBefore and polls > 0:
time.sleep(1)
activeCount = threading.activeCount()
polls -= 1
self.assertTrue(activeCount <= activeBefore, 'activeCount should to be <= %s, instead of %s' % (activeBefore, activeCount))
def _sleep(self, n):
time.sleep(random.random())
class TwistedTestCase(unittest.TestCase):
def test_needs_underscored_versions(self):
self.assertEqual(threading.Lock, threading._Lock)
self.assertEqual(threading.RLock, threading._RLock)
class JavaIntegrationTestCase(unittest.TestCase):
"""Verifies that Thread.__tojava__ correctly gets the underlying Java thread"""
def test_interruptible(self):
def wait_until_interrupted(cv):
name = threading.currentThread().getName()
with cv:
while not JThread.currentThread().isInterrupted():
try:
cv.wait()
except InterruptedException, e:
break
num_threads = 5
unfair_condition = Condition()
threads = [
Thread(
name="thread #%d" % i,
target=wait_until_interrupted,
args=(unfair_condition,))
for i in xrange(num_threads)]
for thread in threads:
thread.start()
time.sleep(0.1)
for thread in threads:
JThread.interrupt(thread)
joined_threads = 0
for thread in threads:
thread.join(1.) # timeout just in case so we don't stall regrtest
joined_threads += 1
self.assertEqual(joined_threads, num_threads)
class MemoryLeakTestCase(unittest.TestCase):
def test_socket_server(self):
# run socketserver with a small amount of memory; verify it exits cleanly
rc = subprocess.call([sys.executable,
"-J-Xmx32m",
test_support.findfile("socketserver_test.py")])
# stdout=PIPE)
self.assertEquals(rc, 0)
def test_main():
test_support.run_unittest(
JavaIntegrationTestCase,
MemoryLeakTestCase,
ThreadingTestCase,
TwistedTestCase)
if __name__ == "__main__":
test_main()
|
anthonylife/EventRecommendation
|
refs/heads/master
|
script/cntNumUserAttend.py
|
1
|
#!/usr/bin/env python
#encoding=utf8
#Copyright [2014] [Wei Zhang]
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
###################################################################
# Date: 2014/6/14 #
# Count distribution of number of users attending activities. #
###################################################################
import csv, json, sys
from collections import defaultdict
with open("../SETTINGS.json") as fp:
settings = json.loads(fp.read())
def main():
eventinfo_path = settings["ROOT_PATH"] + settings["SRC_DATA_FILE1_1"]
staresult_path = "./staresult.txt"
num_attendant = defaultdict(int)
total_num = 0
for i, line in enumerate(open(eventinfo_path)):
try:
num = int(line.strip("\r\t\n").split(",")[9])
num_attendant[num] += 1
total_num += 1
except:
print line
print i
sys.exit(1)
cum_prob = 0.0
num_attendant = sorted(num_attendant.items(), key=lambda x:x[0])
wfd = open(staresult_path, "w")
for pair in num_attendant:
cum_prob += 1.0*pair[1]/total_num
wfd.write("%d %d %.4f\n" % (pair[0], pair[1], cum_prob))
wfd.close()
if __name__ == "__main__":
main()
|
wlerin/streamlink
|
refs/heads/master
|
src/streamlink/stream/flvconcat.py
|
6
|
from __future__ import division
import logging
from collections import namedtuple
from io import IOBase
from itertools import chain, islice
from threading import Thread
from ..buffers import RingBuffer
from ..packages.flashmedia import FLVError
from ..packages.flashmedia.tag import (AudioData, AACAudioData, VideoData,
AVCVideoData, VideoCommandFrame,
Header, ScriptData, Tag)
from ..packages.flashmedia.tag import (AAC_PACKET_TYPE_SEQUENCE_HEADER,
AVC_PACKET_TYPE_SEQUENCE_HEADER,
AUDIO_CODEC_ID_AAC,
VIDEO_CODEC_ID_AVC,
TAG_TYPE_AUDIO,
TAG_TYPE_VIDEO)
__all__ = ["extract_flv_header_tags", "FLVTagConcat", "FLVTagConcatIO"]
log = logging.getLogger(__name__)
FLVHeaderTags = namedtuple("FLVHeaderTags", "metadata aac vc")
def iter_flv_tags(fd=None, buf=None, strict=False, skip_header=False):
if not (fd or buf):
return
offset = 0
if not skip_header:
if fd:
Header.deserialize(fd)
elif buf:
header, offset = Header.deserialize_from(buf, offset)
while fd or buf and offset < len(buf):
try:
if fd:
tag = Tag.deserialize(fd, strict=strict)
elif buf:
tag, offset = Tag.deserialize_from(buf, offset, strict=strict)
except (IOError, FLVError) as err:
if "Insufficient tag header" in str(err):
break
raise IOError(err)
yield tag
def extract_flv_header_tags(stream):
fd = stream.open()
metadata = aac_header = avc_header = None
for tag_index, tag in enumerate(iter_flv_tags(fd)):
if isinstance(tag.data, ScriptData) and tag.data.name == "onMetaData":
metadata = tag
elif (isinstance(tag.data, VideoData) and
isinstance(tag.data.data, AVCVideoData)):
if tag.data.data.type == AVC_PACKET_TYPE_SEQUENCE_HEADER:
avc_header = tag
elif (isinstance(tag.data, AudioData) and
isinstance(tag.data.data, AACAudioData)):
if tag.data.data.type == AAC_PACKET_TYPE_SEQUENCE_HEADER:
aac_header = tag
if aac_header and avc_header and metadata:
break
# Give up after 10 tags
if tag_index == 9:
break
return FLVHeaderTags(metadata, aac_header, avc_header)
class FLVTagConcat(object):
def __init__(self, duration=None, tags=[], has_video=True, has_audio=True,
flatten_timestamps=False, sync_headers=False):
self.duration = duration
self.flatten_timestamps = flatten_timestamps
self.has_audio = has_audio
self.has_video = has_video
self.sync_headers = sync_headers
self.tags = tags
if not (has_audio and has_video):
self.sync_headers = False
self.audio_header_written = False
self.flv_header_written = False
self.video_header_written = False
self.timestamps_add = {}
self.timestamps_orig = {}
self.timestamps_sub = {}
@property
def headers_written(self):
return self.audio_header_written and self.video_header_written
def verify_tag(self, tag):
if tag.filter:
raise IOError("Tag has filter flag set, probably encrypted")
# Only AAC and AVC has detectable headers
if isinstance(tag.data, AudioData) and tag.data.codec != AUDIO_CODEC_ID_AAC:
self.audio_header_written = True
if isinstance(tag.data, VideoData) and tag.data.codec != VIDEO_CODEC_ID_AVC:
self.video_header_written = True
# Make sure there is no timestamp gap between audio and video when syncing
if self.sync_headers and self.timestamps_sub and not self.headers_written:
self.timestamps_sub = {}
if isinstance(tag.data, AudioData):
if isinstance(tag.data.data, AACAudioData):
if tag.data.data.type == AAC_PACKET_TYPE_SEQUENCE_HEADER:
if self.audio_header_written:
return
self.audio_header_written = True
else:
if self.sync_headers and not self.headers_written:
return
if not self.audio_header_written:
return
else:
if self.sync_headers and not self.headers_written:
return
elif isinstance(tag.data, VideoData):
if isinstance(tag.data.data, AVCVideoData):
if tag.data.data.type == AVC_PACKET_TYPE_SEQUENCE_HEADER:
if self.video_header_written:
return
self.video_header_written = True
else:
if self.sync_headers and not self.headers_written:
return
if not self.video_header_written:
return
elif isinstance(tag.data.data, VideoCommandFrame):
return
else:
if self.sync_headers and not self.headers_written:
return
elif isinstance(tag.data, ScriptData):
if tag.data.name == "onMetaData":
if self.duration:
tag.data.value["duration"] = self.duration
elif "duration" in tag.data.value:
del tag.data.value["duration"]
else:
return False
return True
def adjust_tag_gap(self, tag):
timestamp_gap = tag.timestamp - self.timestamps_orig.get(tag.type, 0)
timestamp_sub = self.timestamps_sub.get(tag.type)
if timestamp_gap > 1000 and timestamp_sub is not None:
self.timestamps_sub[tag.type] += timestamp_gap
self.timestamps_orig[tag.type] = tag.timestamp
def adjust_tag_timestamp(self, tag):
timestamp_offset_sub = self.timestamps_sub.get(tag.type)
if timestamp_offset_sub is None and tag not in self.tags:
self.timestamps_sub[tag.type] = tag.timestamp
timestamp_offset_sub = self.timestamps_sub.get(tag.type)
timestamp_offset_add = self.timestamps_add.get(tag.type)
if timestamp_offset_add:
tag.timestamp = max(0, tag.timestamp + timestamp_offset_add)
elif timestamp_offset_sub:
tag.timestamp = max(0, tag.timestamp - timestamp_offset_sub)
def analyze_tags(self, tag_iterator):
tags = list(islice(tag_iterator, 10))
audio_tags = len(list(filter(lambda t: t.type == TAG_TYPE_AUDIO, tags)))
video_tags = len(list(filter(lambda t: t.type == TAG_TYPE_VIDEO, tags)))
self.has_audio = audio_tags > 0
self.has_video = video_tags > 0
if not (self.has_audio and self.has_video):
self.sync_headers = False
return tags
def iter_tags(self, fd=None, buf=None, skip_header=None):
if skip_header is None:
skip_header = not not self.tags
tags_iterator = filter(None, self.tags)
flv_iterator = iter_flv_tags(fd=fd, buf=buf, skip_header=skip_header)
for tag in chain(tags_iterator, flv_iterator):
yield tag
def iter_chunks(self, fd=None, buf=None, skip_header=None):
"""Reads FLV tags from fd or buf and returns them with adjusted
timestamps."""
timestamps = dict(self.timestamps_add)
tag_iterator = self.iter_tags(fd=fd, buf=buf, skip_header=skip_header)
if not self.flv_header_written:
analyzed_tags = self.analyze_tags(tag_iterator)
else:
analyzed_tags = []
for tag in chain(analyzed_tags, tag_iterator):
if not self.flv_header_written:
flv_header = Header(has_video=self.has_video,
has_audio=self.has_audio)
yield flv_header.serialize()
self.flv_header_written = True
if self.verify_tag(tag):
self.adjust_tag_gap(tag)
self.adjust_tag_timestamp(tag)
if self.duration:
norm_timestamp = tag.timestamp / 1000
if norm_timestamp > self.duration:
break
yield tag.serialize()
timestamps[tag.type] = tag.timestamp
if not self.flatten_timestamps:
self.timestamps_add = timestamps
self.tags = []
class FLVTagConcatWorker(Thread):
def __init__(self, iterator, stream):
self.error = None
self.stream = stream
self.stream_iterator = iterator
self.concater = FLVTagConcat(stream.duration, stream.tags,
**stream.concater_params)
Thread.__init__(self)
self.daemon = True
def run(self):
for fd in self.stream_iterator:
try:
chunks = self.concater.iter_chunks(
fd, skip_header=self.stream.skip_header
)
for chunk in chunks:
self.stream.buffer.write(chunk)
if not self.running:
return
except IOError as err:
self.error = err
break
self.stop()
def stop(self):
self.running = False
self.stream.buffer.close()
def start(self):
self.running = True
return Thread.start(self)
class FLVTagConcatIO(IOBase):
__worker__ = FLVTagConcatWorker
def __init__(self, session, duration=None, tags=[], skip_header=None,
timeout=30, **concater_params):
self.session = session
self.timeout = timeout
self.concater_params = concater_params
self.duration = duration
self.skip_header = skip_header
self.tags = tags
def open(self, iterator):
self.buffer = RingBuffer(self.session.get_option("ringbuffer-size"))
self.worker = self.__worker__(iterator, self)
self.worker.start()
def close(self):
self.worker.stop()
if self.worker.is_alive():
self.worker.join()
def read(self, size=-1):
if not self.buffer:
return b""
if self.worker.error:
raise self.worker.error
return self.buffer.read(size, block=self.worker.is_alive(),
timeout=self.timeout)
|
listen-lavender/webcrawl
|
refs/heads/master
|
webcrawl/queue/lib/__init__.py
|
1
|
#!/usr/bin/env python
# coding=utf8
import redis
|
rlefevre1/hpp-rbprm-corba
|
refs/heads/modifs
|
script/dynamic/downSlope_hrp2_pathKino.py
|
1
|
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.gepetto import Viewer
from hpp.corbaserver import Client
from hpp.corbaserver.robot import Robot as Parent
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
class Robot (Parent):
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
# URDF file describing the trunk of the robot HyQ
urdfName = 'hrp2_trunk_flexible'
urdfSuffix = ""
srdfSuffix = ""
def __init__ (self, robotName, load = True):
Parent.__init__ (self, robotName, self.rootJointType, load)
self.tf_root = "base_footprint"
self.client.basic = Client ()
self.load = load
rootJointType = 'freeflyer'
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
urdfName = 'hrp2_trunk_flexible'
urdfNameRom = ['hrp2_larm_rom','hrp2_rarm_rom','hrp2_lleg_rom','hrp2_rleg_rom']
urdfSuffix = ""
srdfSuffix = ""
vMax = 4;
aMax = 6;
extraDof = 6
# Creating an instance of the helper class, and loading the robot
rbprmBuilder = Builder ()
rbprmBuilder.loadModel(urdfName, urdfNameRom, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
#rbprmBuilder.setJointBounds ("base_joint_xyz", [-1.25,2, -0.5, 5.5, 0.6, 1.8])
rbprmBuilder.setJointBounds ("base_joint_xyz", [-2,4, 0.5, 1.5, 0.25, 1.8])
rbprmBuilder.setJointBounds('CHEST_JOINT0',[0,0])
rbprmBuilder.setJointBounds('CHEST_JOINT1',[-0.35,0.1])
rbprmBuilder.setJointBounds('HEAD_JOINT0',[0,0])
rbprmBuilder.setJointBounds('HEAD_JOINT1',[0,0])
# The following lines set constraint on the valid configurations:
# a configuration is valid only if all limbs can create a contact ...
rbprmBuilder.setFilter(['hrp2_lleg_rom','hrp2_rleg_rom'])
rbprmBuilder.setAffordanceFilter('hrp2_lleg_rom', ['Support',])
rbprmBuilder.setAffordanceFilter('hrp2_rleg_rom', ['Support'])
# We also bound the rotations of the torso. (z, y, x)
rbprmBuilder.boundSO3([-0.1,0.1,-0.65,0.65,-0.2,0.2])
rbprmBuilder.client.basic.robot.setDimensionExtraConfigSpace(extraDof)
rbprmBuilder.client.basic.robot.setExtraConfigSpaceBounds([-4,4,-1,1,-2,2,0,0,0,0,0,0])
indexECS = rbprmBuilder.getConfigSize() - rbprmBuilder.client.basic.robot.getDimensionExtraConfigSpace()
# Creating an instance of HPP problem solver and the viewer
ps = ProblemSolver( rbprmBuilder )
ps.client.problem.setParameter("aMax",aMax)
ps.client.problem.setParameter("vMax",vMax)
ps.client.problem.setParameter("tryJump",vMax)
ps.client.problem.setParameter("sizeFootX",0.24)
ps.client.problem.setParameter("sizeFootY",0.14)
r = Viewer (ps)
from hpp.corbaserver.affordance.affordance import AffordanceTool
afftool = AffordanceTool ()
afftool.setAffordanceConfig('Support', [0.5, 0.03, 0.00005])
afftool.loadObstacleModel (packageName, "downSlope", "planning", r)
#r.loadObstacleModel (packageName, "ground", "planning")
afftool.visualiseAffordances('Support', r, [0.25, 0.5, 0.5])
r.addLandmark(r.sceneName,1)
# Setting initial and goal configurations
q_init = rbprmBuilder.getCurrentConfig ();
q_init[3:7] = [1,0,0,0]
q_init[8] = -0.2
q_init [0:3] = [-1.6, 1, 1.75]; r (q_init)
#q_init[3:7] = [0.7071,0,0,0.7071]
#q_init [0:3] = [1, 1, 0.65]
rbprmBuilder.setCurrentConfig (q_init)
q_goal = q_init [::]
q_goal[3:7] = [1,0,0,0]
q_goal[8] = 0
q_goal [0:3] = [2.5, 1, 0.5]; r (q_goal)
r (q_goal)
#~ q_goal [0:3] = [-1.5, 0, 0.63]; r (q_goal)
# Choosing a path optimizer
ps.setInitialConfig (q_init)
ps.addGoalConfig (q_goal)
# Choosing RBPRM shooter and path validation methods.
ps.client.problem.selectConFigurationShooter("RbprmShooter")
ps.client.problem.selectPathValidation("RbprmPathValidation",0.05)
# Choosing kinodynamic methods :
ps.selectSteeringMethod("RBPRMKinodynamic")
ps.selectDistance("KinodynamicDistance")
ps.selectPathPlanner("DynamicPlanner")
#solve the problem :
r(q_init)
#r.solveAndDisplay("rm",1,0.01)
t = ps.solve ()
from hpp.gepetto import PathPlayer
pp = PathPlayer (rbprmBuilder.client.basic, r)
pp.dt=0.03
pp.displayVelocityPath(0)
r.client.gui.setVisibility("path_0_root","ALWAYS_ON_TOP")
"""
if isinstance(t, list):
t = t[0]* 3600000 + t[1] * 60000 + t[2] * 1000 + t[3]
f = open('log.txt', 'a')
f.write("path computation " + str(t) + "\n")
f.close()
"""
"""
for i in range(0,9):
t = ps.solve()
if isinstance(t, list):
ts = t[0]* 3600. + t[1] * 60. + t[2] + t[3]/1000.
f= open("/local/dev_hpp/logs/benchHrp2_slope_LP.txt","a")
f.write("t = "+str(ts) + "\n")
f.write("path_length = "+str(ps.client.problem.pathLength(i)) + "\n")
f.close()
print "problem "+str(i)+" solved \n"
ps.clearRoadmap()
"""
#ps.client.problem.prepareSolveStepByStep()
#ps.client.problem.finishSolveStepByStep()
q_far = q_init[::]
q_far[2] = -3
r(q_far)
"""
camera = [0.6293167471885681,
-9.560577392578125,
10.504343032836914,
0.9323806762695312,
0.36073973774909973,
0.008668755181133747,
0.02139890193939209]
r.client.gui.setCameraTransform(0,camera)
"""
"""
r.client.gui.removeFromGroup("rm",r.sceneName)
r.client.gui.removeFromGroup("rmstart",r.sceneName)
r.client.gui.removeFromGroup("rmgoal",r.sceneName)
for i in range(0,ps.numberNodes()):
r.client.gui.removeFromGroup("vecRM"+str(i),r.sceneName)
"""
"""
# for seed 1486657707
ps.client.problem.extractPath(0,0,2.15)
# Playing the computed path
from hpp.gepetto import PathPlayer
pp = PathPlayer (rbprmBuilder.client.basic, r)
pp.dt=0.03
pp.displayVelocityPath(1)
r.client.gui.setVisibility("path_1_root","ALWAYS_ON_TOP")
#display path
pp.speed=0.3
#pp (0)
"""
#display path with post-optimisation
"""
q_far = q_init[::]
q_far[2] = -3
r(q_far)
"""
# Manually add waypoints to roadmap:
"""
ps.client.problem.prepareSolveStepByStep()
pbCl = rbprmBuilder.client.basic.problem
q1= [0.6, 1, 0.5, 1, 0, 0, 0, 0.0, 0, 0.0, 0.0, 3, 0.0, -1.5, 0.0, 0.0, 0.0]
pbCl.addConfigToRoadmap (q1)
pbCl.directPath(q1,q_goal,True)
pbCl.directPath(q_init,q1,False)
r.client.gui.removeFromGroup("path_"+str(ps.numberPaths()-2)+"_root",r.sceneName)
pp.displayVelocityPath(ps.numberPaths()-1)
pbCl.addEdgeToRoadmap (q_init, q1, 1, False)
pbCl.addEdgeToRoadmap (q1, q_goal, 0, False)
"""
|
gangadharkadam/letzerp
|
refs/heads/v5.0
|
erpnext/templates/pages/product_search.py
|
35
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr
from erpnext.setup.doctype.item_group.item_group import get_item_for_list_in_html
no_cache = 1
no_sitemap = 1
@frappe.whitelist(allow_guest=True)
def get_product_list(search=None, start=0, limit=10):
# base query
query = """select name, item_name, page_name, website_image, item_group,
web_long_description as website_description, parent_website_route
from `tabItem` where show_in_website = 1"""
# search term condition
if search:
query += """ and (web_long_description like %(search)s
or description like %(search)s
or item_name like %(search)s
or name like %(search)s)"""
search = "%" + cstr(search) + "%"
# order by
query += """ order by weightage desc, modified desc limit %s, %s""" % (start, limit)
data = frappe.db.sql(query, {
"search": search,
}, as_dict=1)
for d in data:
d.route = ((d.parent_website_route + "/") if d.parent_website_route else "") \
+ d.page_name
return [get_item_for_list_in_html(r) for r in data]
|
jelugbo/hebs_master
|
refs/heads/master
|
lms/djangoapps/notifier_api/serializers.py
|
14
|
from django.contrib.auth.models import User
from django.http import Http404
from rest_framework import serializers
from course_groups.cohorts import is_course_cohorted
from notification_prefs import NOTIFICATION_PREF_KEY
from lang_pref import LANGUAGE_KEY
class NotifierUserSerializer(serializers.ModelSerializer):
"""
A serializer containing all information about a user needed by the notifier
(namely the user's name, email address, notification and language
preferences, and course enrollment and cohort information).
Because these pieces of information reside in different tables, this is
designed to work well with prefetch_related and select_related, which
require the use of all() instead of get() or filter(). The following fields
should be prefetched on the user objects being serialized:
* profile
* preferences
* courseenrollment_set
* course_groups
* roles__permissions
"""
name = serializers.SerializerMethodField("get_name")
preferences = serializers.SerializerMethodField("get_preferences")
course_info = serializers.SerializerMethodField("get_course_info")
def get_name(self, user):
return user.profile.name
def get_preferences(self, user):
return {
pref.key: pref.value
for pref
in user.preferences.all()
if pref.key in [LANGUAGE_KEY, NOTIFICATION_PREF_KEY]
}
def get_course_info(self, user):
cohort_id_map = {
cohort.course_id: cohort.id
for cohort in user.course_groups.all()
}
see_all_cohorts_set = {
role.course_id
for role in user.roles.all()
for perm in role.permissions.all() if perm.name == "see_all_cohorts"
}
ret = {}
for enrollment in user.courseenrollment_set.all():
if enrollment.is_active:
try:
ret[unicode(enrollment.course_id)] = {
"cohort_id": cohort_id_map.get(enrollment.course_id),
"see_all_cohorts": (
enrollment.course_id in see_all_cohorts_set or
not is_course_cohorted(enrollment.course_id)
),
}
except Http404: # is_course_cohorted raises this if course does not exist
pass
return ret
class Meta:
model = User
fields = ("id", "email", "name", "preferences", "course_info")
read_only_fields = ("id", "email")
|
hendradarwin/VTK
|
refs/heads/master
|
Examples/GUI/Python/OrthogonalPlanesWithTkPhoto.py
|
9
|
import vtk
from vtk import *
import Tkinter
from Tkinter import *
import sys, os
import vtk.tk
import vtk.tk.vtkLoadPythonTkWidgets
import vtk.tk.vtkTkImageViewerWidget
from vtk.tk.vtkTkPhotoImage import *
from vtk.util.misc import *
class SampleViewer:
def __init__ ( self ):
self.Tk = Tk = Tkinter.Tk();
Tk.title ( 'Python Version of vtkImageDataToTkPhoto' );
# Image pipeline
reader = vtkVolume16Reader ()
reader.SetDataDimensions ( 64, 64 )
reader.SetDataByteOrderToLittleEndian ( )
reader.SetFilePrefix ( vtkGetDataRoot() + '/Data/headsq/quarter' )
reader.SetImageRange ( 1, 93 )
reader.SetDataSpacing ( 3.2, 3.2, 1.5 )
reader.Update ()
self.cast = cast = vtkImageCast()
cast.SetInputConnection( reader.GetOutputPort() )
cast.SetOutputScalarType ( reader.GetOutput().GetScalarType() )
cast.ClampOverflowOn()
# Make the image a little bigger
self.resample = resample = vtkImageResample ()
resample.SetInputConnection( cast.GetOutputPort() )
resample.SetAxisMagnificationFactor ( 0, 2 )
resample.SetAxisMagnificationFactor ( 1, 2 )
resample.SetAxisMagnificationFactor ( 2, 1 )
l,h = reader.GetOutput().GetScalarRange()
# Create the three orthogonal views
tphoto = self.tphoto = self.tphoto = vtkTkPhotoImage ();
cphoto = self.cphoto = vtkTkPhotoImage ();
sphoto = self.sphoto = vtkTkPhotoImage ();
reader.Update()
d = reader.GetOutput().GetDimensions()
self.Position = [ int(d[0]/2.0), int(d[0]/2.0), int(d[0]/2.0) ]
# Create a popup menu
v = IntVar()
self.popup = popup = Menu ( Tk, tearoff=0 )
popup.add_radiobutton ( label='unsigned char', command=self.CastToUnsignedChar, variable=v, value=-1 )
popup.add_radiobutton ( label='unsigned short', command=self.CastToUnsignedShort, variable=v, value=0 )
popup.add_radiobutton ( label='unsigned int', command=self.CastToFloat, variable=v, value=1 )
popup.add_radiobutton ( label='float', command=self.CastToFloat, variable=v, value=2 )
v.set ( 0 )
w = self.TransverseLabelWidget = Label ( Tk, image = tphoto )
w.grid ( row = 0, column = 0 )
w.bind ( "<Button1-Motion>", lambda e, i=tphoto, o='transverse', s=self: s.Motion ( e, i, o ) )
w.bind ( "<Button-3>", self.DoPopup )
w = Label ( Tk, image = cphoto )
w.grid ( row = 1, column = 0 )
w.bind ( "<Button1-Motion>", lambda e, i=cphoto, o='coronal', s=self: s.Motion ( e, i, o ) )
w.bind ( "<Button-3>", self.DoPopup )
w = Label ( Tk, image = sphoto )
w.grid ( row = 0, column = 1 )
w.bind ( "<Button1-Motion>", lambda e, i=sphoto, o='sagittal', s=self: s.Motion ( e, i, o ) )
w.bind ( "<Button-3>", self.DoPopup )
w = self.WindowWidget = Scale ( Tk, label='Window', orient='horizontal', from_=1, to=(h-l)/2, command = self.SetWindowLevel )
w = self.LevelWidget = Scale ( Tk, label='Level', orient='horizontal', from_=l, to=h, command=self.SetWindowLevel )
self.WindowWidget.grid ( row=2, columnspan=2, sticky='ew' )
self.LevelWidget.grid ( row=3, columnspan=2, sticky='ew' );
self.WindowWidget.set ( 1370 );
self.LevelWidget.set ( 1268 );
w = self.LabelWidget = Label ( Tk, bd=2, relief='raised' )
w.grid ( row=4, columnspan=2, sticky='ew' )
w.configure ( text = "Use the right mouse button to change data type" )
def DoPopup ( self, event ):
self.popup.post ( event.x_root, event.y_root )
def CastToUnsignedChar ( self ):
self.cast.SetOutputScalarTypeToUnsignedChar()
self.SetImages()
def CastToUnsignedShort ( self ):
self.cast.SetOutputScalarTypeToUnsignedShort()
self.SetImages()
def CastToUnsignedInt ( self ):
self.cast.SetOutputScalarTypeToUnsignedInt()
self.SetImages()
def CastToFloat ( self ):
self.cast.SetOutputScalarTypeToFloat()
self.SetImages()
def Motion ( self, event, image, orientation ):
w = image.width();
h = image.height()
if orientation == 'transverse':
self.Position[0] = event.x
self.Position[1] = h - event.y - 1
if orientation == 'coronal':
self.Position[0] = event.x;
self.Position[2] = event.y
if orientation == 'sagittal':
self.Position[1] = w - event.x - 1
self.Position[2] = event.y
self.LabelWidget.configure ( text = "Position: %d, %d, %d" % tuple ( self.Position ) )
self.SetImages()
def SetWindowLevel ( self, event ):
self.SetImages()
def SetImages ( self ):
Window = self.WindowWidget.get()
Level = self.LevelWidget.get()
image = self.resample.GetOutput()
self.tphoto.PutImageSlice ( self.resample.GetOutputPort(),
self.Position[2],
'transverse',
Window,
Level )
self.sphoto.PutImageSlice ( self.resample.GetOutputPort(),
self.Position[0],
'sagittal',
Window,
Level )
self.cphoto.PutImageSlice ( self.resample.GetOutputPort(),
self.Position[1],
'coronal',
Window,
Level )
if __name__ == '__main__':
S = SampleViewer()
S.Tk.mainloop()
|
dslomov/intellij-community
|
refs/heads/master
|
python/helpers/pydev/pycompletion.py
|
56
|
#!/usr/bin/python
'''
@author Radim Kubacki
'''
import _pydev_imports_tipper
import traceback
import StringIO
import sys
import urllib
import pycompletionserver
#=======================================================================================================================
# GetImports
#=======================================================================================================================
def GetImports(module_name):
try:
processor = pycompletionserver.Processor()
data = urllib.unquote_plus(module_name)
def_file, completions = _pydev_imports_tipper.GenerateTip(data)
return processor.formatCompletionMessage(def_file, completions)
except:
s = StringIO.StringIO()
exc_info = sys.exc_info()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], limit=None, file=s)
err = s.getvalue()
pycompletionserver.dbg('Received error: ' + str(err), pycompletionserver.ERROR)
raise
#=======================================================================================================================
# main
#=======================================================================================================================
if __name__ == '__main__':
mod_name = sys.argv[1]
print(GetImports(mod_name))
|
bsipocz/scikit-image
|
refs/heads/master
|
skimage/feature/tests/test_orb.py
|
20
|
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, run_module_suite
from skimage.feature import ORB
from skimage import data
from skimage.color import rgb2gray
from skimage._shared.testing import test_parallel
img = rgb2gray(data.lena())
@test_parallel()
def test_keypoints_orb_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20)
detector_extractor.detect(img)
exp_rows = np.array([ 435. , 435.6 , 376. , 455. , 434.88, 269. ,
375.6 , 310.8 , 413. , 311.04])
exp_cols = np.array([ 180. , 180. , 156. , 176. , 180. , 111. ,
156. , 172.8, 70. , 172.8])
exp_scales = np.array([ 1. , 1.2 , 1. , 1. , 1.44 , 1. ,
1.2 , 1.2 , 1. , 1.728])
exp_orientations = np.array([-175.64733392, -167.94842949, -148.98350192,
-142.03599837, -176.08535837, -53.08162354,
-150.89208271, 97.7693776 , -173.4479964 ,
38.66312042])
exp_response = np.array([ 0.96770745, 0.81027306, 0.72376257,
0.5626413 , 0.5097993 , 0.44351774,
0.39154173, 0.39084861, 0.39063076,
0.37602487])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def test_keypoints_orb_less_than_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=15, fast_n=12,
fast_threshold=0.33, downscale=2, n_scales=2)
detector_extractor.detect(img)
exp_rows = np.array([ 67., 247., 269., 413., 435., 230., 264.,
330., 372.])
exp_cols = np.array([ 157., 146., 111., 70., 180., 136., 336.,
148., 156.])
exp_scales = np.array([ 1., 1., 1., 1., 1., 2., 2., 2., 2.])
exp_orientations = np.array([-105.76503839, -96.28973044, -53.08162354,
-173.4479964 , -175.64733392, -106.07927215,
-163.40016243, 75.80865813, -154.73195911])
exp_response = np.array([ 0.13197835, 0.24931321, 0.44351774,
0.39063076, 0.96770745, 0.04935129,
0.21431068, 0.15826555, 0.42403573])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def test_descriptor_orb():
detector_extractor = ORB(fast_n=12, fast_threshold=0.20)
exp_descriptors = np.array([[ True, False, True, True, False, False, False, False, False, False],
[False, False, True, True, False, True, True, False, True, True],
[ True, False, False, False, True, False, True, True, True, False],
[ True, False, False, True, False, True, True, False, False, False],
[False, True, True, True, False, False, False, True, True, False],
[False, False, False, False, False, True, False, True, True, True],
[False, True, True, True, True, False, False, True, False, True],
[ True, True, True, False, True, True, True, True, False, False],
[ True, True, False, True, True, True, True, False, False, False],
[ True, False, False, False, False, True, False, False, True, True],
[ True, False, False, False, True, True, True, False, False, False],
[False, False, True, False, True, False, False, True, False, False],
[False, False, True, True, False, False, False, False, False, True],
[ True, True, False, False, False, True, True, True, True, True],
[ True, True, True, False, False, True, False, True, True, False],
[False, True, True, False, False, True, True, True, True, True],
[ True, True, True, False, False, False, False, True, True, True],
[False, False, False, False, True, False, False, True, True, False],
[False, True, False, False, True, False, False, False, True, True],
[ True, False, True, False, False, False, True, True, False, False]], dtype=bool)
detector_extractor.detect(img)
detector_extractor.extract(img, detector_extractor.keypoints,
detector_extractor.scales,
detector_extractor.orientations)
assert_equal(exp_descriptors,
detector_extractor.descriptors[100:120, 10:20])
detector_extractor.detect_and_extract(img)
assert_equal(exp_descriptors,
detector_extractor.descriptors[100:120, 10:20])
if __name__ == '__main__':
run_module_suite()
|
Microvellum/Fluid-Designer
|
refs/heads/master
|
win64-vc/2.78/Python/bin/2.78/scripts/addons_contrib/io_export_md3.py
|
1
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Quake Model 3 (.md3)",
"author": "Xembie",
"version": (0, 7),
"blender": (2, 53, 0),
"location": "File > Export",
"description": "Save a Quake Model 3 File)",
"warning": "", # used for warning icon and text in addons panel
"wiki_url": "",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"category": "Import-Export"}
import bpy,struct,math,os
MAX_QPATH = 64
MD3_IDENT = "IDP3"
MD3_VERSION = 15
MD3_MAX_TAGS = 16
MD3_MAX_SURFACES = 32
MD3_MAX_FRAMES = 1024
MD3_MAX_SHADERS = 256
MD3_MAX_VERTICES = 4096
MD3_MAX_TRIANGLES = 8192
MD3_XYZ_SCALE = 64.0
class md3Vert:
xyz = []
normal = 0
binaryFormat = "<3hH"
def __init__(self):
self.xyz = [0.0, 0.0, 0.0]
self.normal = 0
def GetSize(self):
return struct.calcsize(self.binaryFormat)
# copied from PhaethonH <phaethon@linux.ucla.edu> md3.py
def Decode(self, latlng):
lat = (latlng >> 8) & 0xFF;
lng = (latlng) & 0xFF;
lat *= math.pi / 128;
lng *= math.pi / 128;
x = math.cos(lat) * math.sin(lng)
y = math.sin(lat) * math.sin(lng)
z = math.cos(lng)
retval = [ x, y, z ]
return retval
# copied from PhaethonH <phaethon@linux.ucla.edu> md3.py
def Encode(self, normal):
x = normal[0]
y = normal[1]
z = normal[2]
# normalize
l = math.sqrt((x*x) + (y*y) + (z*z))
if l == 0:
return 0
x = x/l
y = y/l
z = z/l
if (x == 0.0) & (y == 0.0) :
if z > 0.0:
return 0
else:
return (128 << 8)
lng = math.acos(z) * 255 / (2 * math.pi)
lat = math.atan2(y, x) * 255 / (2 * math.pi)
retval = ((int(lat) & 0xFF) << 8) | (int(lng) & 0xFF)
return retval
def Save(self, file):
tmpData = [0] * 4
tmpData[0] = int(self.xyz[0] * MD3_XYZ_SCALE)
tmpData[1] = int(self.xyz[1] * MD3_XYZ_SCALE)
tmpData[2] = int(self.xyz[2] * MD3_XYZ_SCALE)
tmpData[3] = self.normal
data = struct.pack(self.binaryFormat, tmpData[0], tmpData[1], tmpData[2], tmpData[3])
file.write(data)
class md3TexCoord:
u = 0.0
v = 0.0
binaryFormat = "<2f"
def __init__(self):
self.u = 0.0
self.v = 0.0
def GetSize(self):
return struct.calcsize(self.binaryFormat)
def Save(self, file):
tmpData = [0] * 2
tmpData[0] = self.u
tmpData[1] = 1.0 - self.v
data = struct.pack(self.binaryFormat, tmpData[0], tmpData[1])
file.write(data)
class md3Triangle:
indexes = []
binaryFormat = "<3i"
def __init__(self):
self.indexes = [ 0, 0, 0 ]
def GetSize(self):
return struct.calcsize(self.binaryFormat)
def Save(self, file):
tmpData = [0] * 3
tmpData[0] = self.indexes[0]
tmpData[1] = self.indexes[2] # reverse
tmpData[2] = self.indexes[1] # reverse
data = struct.pack(self.binaryFormat,tmpData[0], tmpData[1], tmpData[2])
file.write(data)
class md3Shader:
name = ""
index = 0
binaryFormat = "<%dsi" % MAX_QPATH
def __init__(self):
self.name = ""
self.index = 0
def GetSize(self):
return struct.calcsize(self.binaryFormat)
def Save(self, file):
tmpData = [0] * 2
tmpData[0] = self.name
tmpData[1] = self.index
data = struct.pack(self.binaryFormat, tmpData[0], tmpData[1])
file.write(data)
class md3Surface:
ident = ""
name = ""
flags = 0
numFrames = 0
numShaders = 0
numVerts = 0
numTriangles = 0
ofsTriangles = 0
ofsShaders = 0
ofsUV = 0
ofsVerts = 0
ofsEnd = 0
shaders = []
triangles = []
uv = []
verts = []
binaryFormat = "<4s%ds10i" % MAX_QPATH # 1 int, name, then 10 ints
def __init__(self):
self.ident = ""
self.name = ""
self.flags = 0
self.numFrames = 0
self.numShaders = 0
self.numVerts = 0
self.numTriangles = 0
self.ofsTriangles = 0
self.ofsShaders = 0
self.ofsUV = 0
self.ofsVerts = 0
self.ofsEnd
self.shaders = []
self.triangles = []
self.uv = []
self.verts = []
def GetSize(self):
sz = struct.calcsize(self.binaryFormat)
self.ofsTriangles = sz
for t in self.triangles:
sz += t.GetSize()
self.ofsShaders = sz
for s in self.shaders:
sz += s.GetSize()
self.ofsUV = sz
for u in self.uv:
sz += u.GetSize()
self.ofsVerts = sz
for v in self.verts:
sz += v.GetSize()
self.ofsEnd = sz
return self.ofsEnd
def Save(self, file):
self.GetSize()
tmpData = [0] * 12
tmpData[0] = self.ident
tmpData[1] = self.name
tmpData[2] = self.flags
tmpData[3] = self.numFrames
tmpData[4] = self.numShaders
tmpData[5] = self.numVerts
tmpData[6] = self.numTriangles
tmpData[7] = self.ofsTriangles
tmpData[8] = self.ofsShaders
tmpData[9] = self.ofsUV
tmpData[10] = self.ofsVerts
tmpData[11] = self.ofsEnd
data = struct.pack(self.binaryFormat, tmpData[0],tmpData[1],tmpData[2],tmpData[3],tmpData[4],tmpData[5],tmpData[6],tmpData[7],tmpData[8],tmpData[9],tmpData[10],tmpData[11])
file.write(data)
# write the tri data
for t in self.triangles:
t.Save(file)
# save the shader coordinates
for s in self.shaders:
s.Save(file)
# save the uv info
for u in self.uv:
u.Save(file)
# save the verts
for v in self.verts:
v.Save(file)
class md3Tag:
name = ""
origin = []
axis = []
binaryFormat="<%ds3f9f" % MAX_QPATH
def __init__(self):
self.name = ""
self.origin = [0, 0, 0]
self.axis = [0, 0, 0, 0, 0, 0, 0, 0, 0]
def GetSize(self):
return struct.calcsize(self.binaryFormat)
def Save(self, file):
tmpData = [0] * 13
tmpData[0] = self.name
tmpData[1] = float(self.origin[0])
tmpData[2] = float(self.origin[1])
tmpData[3] = float(self.origin[2])
tmpData[4] = float(self.axis[0])
tmpData[5] = float(self.axis[1])
tmpData[6] = float(self.axis[2])
tmpData[7] = float(self.axis[3])
tmpData[8] = float(self.axis[4])
tmpData[9] = float(self.axis[5])
tmpData[10] = float(self.axis[6])
tmpData[11] = float(self.axis[7])
tmpData[12] = float(self.axis[8])
data = struct.pack(self.binaryFormat, tmpData[0],tmpData[1],tmpData[2],tmpData[3],tmpData[4],tmpData[5],tmpData[6], tmpData[7], tmpData[8], tmpData[9], tmpData[10], tmpData[11], tmpData[12])
file.write(data)
class md3Frame:
mins = 0
maxs = 0
localOrigin = 0
radius = 0.0
name = ""
binaryFormat="<3f3f3ff16s"
def __init__(self):
self.mins = [0, 0, 0]
self.maxs = [0, 0, 0]
self.localOrigin = [0, 0, 0]
self.radius = 0.0
self.name = ""
def GetSize(self):
return struct.calcsize(self.binaryFormat)
def Save(self, file):
tmpData = [0] * 11
tmpData[0] = self.mins[0]
tmpData[1] = self.mins[1]
tmpData[2] = self.mins[2]
tmpData[3] = self.maxs[0]
tmpData[4] = self.maxs[1]
tmpData[5] = self.maxs[2]
tmpData[6] = self.localOrigin[0]
tmpData[7] = self.localOrigin[1]
tmpData[8] = self.localOrigin[2]
tmpData[9] = self.radius
tmpData[10] = self.name
data = struct.pack(self.binaryFormat, tmpData[0],tmpData[1],tmpData[2],tmpData[3],tmpData[4],tmpData[5],tmpData[6],tmpData[7], tmpData[8], tmpData[9], tmpData[10])
file.write(data)
class md3Object:
# header structure
ident = "" # this is used to identify the file (must be IDP3)
version = 0 # the version number of the file (Must be 15)
name = ""
flags = 0
numFrames = 0
numTags = 0
numSurfaces = 0
numSkins = 0
ofsFrames = 0
ofsTags = 0
ofsSurfaces = 0
ofsEnd = 0
frames = []
tags = []
surfaces = []
binaryFormat="<4si%ds9i" % MAX_QPATH # little-endian (<), 17 integers (17i)
def __init__(self):
self.ident = 0
self.version = 0
self.name = ""
self.flags = 0
self.numFrames = 0
self.numTags = 0
self.numSurfaces = 0
self.numSkins = 0
self.ofsFrames = 0
self.ofsTags = 0
self.ofsSurfaces = 0
self.ofsEnd = 0
self.frames = []
self.tags = []
self.surfaces = []
def GetSize(self):
self.ofsFrames = struct.calcsize(self.binaryFormat)
self.ofsTags = self.ofsFrames
for f in self.frames:
self.ofsTags += f.GetSize()
self.ofsSurfaces += self.ofsTags
for t in self.tags:
self.ofsSurfaces += t.GetSize()
self.ofsEnd = self.ofsSurfaces
for s in self.surfaces:
self.ofsEnd += s.GetSize()
return self.ofsEnd
def Save(self, file):
self.GetSize()
tmpData = [0] * 12
tmpData[0] = self.ident
tmpData[1] = self.version
tmpData[2] = self.name
tmpData[3] = self.flags
tmpData[4] = self.numFrames
tmpData[5] = self.numTags
tmpData[6] = self.numSurfaces
tmpData[7] = self.numSkins
tmpData[8] = self.ofsFrames
tmpData[9] = self.ofsTags
tmpData[10] = self.ofsSurfaces
tmpData[11] = self.ofsEnd
data = struct.pack(self.binaryFormat, tmpData[0],tmpData[1],tmpData[2],tmpData[3],tmpData[4],tmpData[5],tmpData[6],tmpData[7], tmpData[8], tmpData[9], tmpData[10], tmpData[11])
file.write(data)
for f in self.frames:
f.Save(file)
for t in self.tags:
t.Save(file)
for s in self.surfaces:
s.Save(file)
def message(log,msg):
if log:
log.write(msg + "\n")
else:
print(msg)
class md3Settings:
def __init__(self,
savepath,
name,
logpath,
overwrite=True,
dumpall=False,
ignoreuvs=False,
scale=1.0,
offsetx=0.0,
offsety=0.0,
offsetz=0.0):
self.savepath = savepath
self.name = name
self.logpath = logpath
self.overwrite = overwrite
self.dumpall = dumpall
self.ignoreuvs = ignoreuvs
self.scale = scale
self.offsetx = offsetx
self.offsety = offsety
self.offsetz = offsetz
def print_md3(log,md3,dumpall):
message(log,"Header Information")
message(log,"Ident: " + str(md3.ident))
message(log,"Version: " + str(md3.version))
message(log,"Name: " + md3.name)
message(log,"Flags: " + str(md3.flags))
message(log,"Number of Frames: " + str(md3.numFrames))
message(log,"Number of Tags: " + str(md3.numTags))
message(log,"Number of Surfaces: " + str(md3.numSurfaces))
message(log,"Number of Skins: " + str(md3.numSkins))
message(log,"Offset Frames: " + str(md3.ofsFrames))
message(log,"Offset Tags: " + str(md3.ofsTags))
message(log,"Offset Surfaces: " + str(md3.ofsSurfaces))
message(log,"Offset end: " + str(md3.ofsEnd))
if dumpall:
message(log,"Frames:")
for f in md3.frames:
message(log," Mins: " + str(f.mins[0]) + " " + str(f.mins[1]) + " " + str(f.mins[2]))
message(log," Maxs: " + str(f.maxs[0]) + " " + str(f.maxs[1]) + " " + str(f.maxs[2]))
message(log," Origin(local): " + str(f.localOrigin[0]) + " " + str(f.localOrigin[1]) + " " + str(f.localOrigin[2]))
message(log," Radius: " + str(f.radius))
message(log," Name: " + f.name)
message(log,"Tags:")
for t in md3.tags:
message(log," Name: " + t.name)
message(log," Origin: " + str(t.origin[0]) + " " + str(t.origin[1]) + " " + str(t.origin[2]))
message(log," Axis[0]: " + str(t.axis[0]) + " " + str(t.axis[1]) + " " + str(t.axis[2]))
message(log," Axis[1]: " + str(t.axis[3]) + " " + str(t.axis[4]) + " " + str(t.axis[5]))
message(log," Axis[2]: " + str(t.axis[6]) + " " + str(t.axis[7]) + " " + str(t.axis[8]))
message(log,"Surfaces:")
for s in md3.surfaces:
message(log," Ident: " + s.ident)
message(log," Name: " + s.name)
message(log," Flags: " + str(s.flags))
message(log," # of Frames: " + str(s.numFrames))
message(log," # of Shaders: " + str(s.numShaders))
message(log," # of Verts: " + str(s.numVerts))
message(log," # of Triangles: " + str(s.numTriangles))
message(log," Offset Triangles: " + str(s.ofsTriangles))
message(log," Offset UVs: " + str(s.ofsUV))
message(log," Offset Verts: " + str(s.ofsVerts))
message(log," Offset End: " + str(s.ofsEnd))
message(log," Shaders:")
for shader in s.shaders:
message(log," Name: " + shader.name)
message(log," Index: " + str(shader.index))
message(log," Triangles:")
for tri in s.triangles:
message(log," Indexes: " + str(tri.indexes[0]) + " " + str(tri.indexes[1]) + " " + str(tri.indexes[2]))
message(log," UVs:")
for uv in s.uv:
message(log," U: " + str(uv.u))
message(log," V: " + str(uv.v))
message(log," Verts:")
for vert in s.verts:
message(log," XYZ: " + str(vert.xyz[0]) + " " + str(vert.xyz[1]) + " " + str(vert.xyz[2]))
message(log," Normal: " + str(vert.normal))
shader_count = 0
vert_count = 0
tri_count = 0
for surface in md3.surfaces:
shader_count += surface.numShaders
tri_count += surface.numTriangles
vert_count += surface.numVerts
if md3.numTags >= MD3_MAX_TAGS:
message(log,"!Warning: Tag limit reached! " + str(md3.numTags))
if md3.numSurfaces >= MD3_MAX_SURFACES:
message(log,"!Warning: Surface limit reached! " + str(md3.numSurfaces))
if md3.numFrames >= MD3_MAX_FRAMES:
message(log,"!Warning: Frame limit reached! " + str(md3.numFrames))
if shader_count >= MD3_MAX_SHADERS:
message(log,"!Warning: Shader limit reached! " + str(shader_count))
if vert_count >= MD3_MAX_VERTICES:
message(log,"!Warning: Vertex limit reached! " + str(vert_count))
if tri_count >= MD3_MAX_TRIANGLES:
message(log,"!Warning: Triangle limit reached! " + str(tri_count))
def save_md3(settings):
if settings.logpath:
if settings.overwrite:
log = open(settings.logpath,"w")
else:
log = open(settings.logpath,"a")
else:
log = 0
message(log,"##########Exporting MD3##########")
bpy.ops.object.mode_set(mode='OBJECT')
md3 = md3Object()
md3.ident = MD3_IDENT
md3.version = MD3_VERSION
md3.name = settings.name
md3.numFrames = (bpy.context.scene.frame_end + 1) - bpy.context.scene.frame_start
for obj in bpy.context.selected_objects:
if obj.type == 'MESH':
nsurface = md3Surface()
nsurface.name = obj.name
nsurface.ident = MD3_IDENT
vertlist = []
for f,face in enumerate(obj.data.faces):
ntri = md3Triangle()
if len(face.verts) != 3:
message(log,"Found a nontriangle face in object " + obj.name)
continue
for v,vert_index in enumerate(face.verts):
uv_u = round(obj.data.active_uv_texture.data[f].uv[v][0],5)
uv_v = round(obj.data.active_uv_texture.data[f].uv[v][1],5)
match = 0
match_index = 0
for i,vi in enumerate(vertlist):
if vi == vert_index:
if settings.ignoreuvs:
match = 1#there is a uv match for all
match_index = i
else:
if nsurface.uv[i].u == uv_u and nsurface.uv[i].v == uv_v:
match = 1
match_index = i
if match == 0:
vertlist.append(vert_index)
ntri.indexes[v] = nsurface.numVerts
ntex = md3TexCoord()
ntex.u = uv_u
ntex.v = uv_v
nsurface.uv.append(ntex)
nsurface.numVerts += 1
else:
ntri.indexes[v] = match_index
nsurface.triangles.append(ntri)
nsurface.numTriangles += 1
if obj.data.active_uv_texture:
nshader = md3Shader()
nshader.name = obj.data.active_uv_texture.name
nshader.index = nsurface.numShaders
nsurface.shaders.append(nshader)
nsurface.numShaders += 1
if nsurface.numShaders < 1: #we should add a blank as a placeholder
nshader = md3Shader()
nshader.name = "NULL"
nsurface.shaders.append(nshader)
nsurface.numShaders += 1
for frame in range(bpy.context.scene.frame_start,bpy.context.scene.frame_end + 1):
bpy.context.scene.set_frame(frame)
fobj = obj.create_mesh(bpy.context.scene,True,'PREVIEW')
fobj.calc_normals()
nframe = md3Frame()
nframe.name = str(frame)
for vi in vertlist:
vert = fobj.verts[vi]
nvert = md3Vert()
nvert.xyz = vert.co * obj.matrix_world
nvert.xyz[0] = (round(nvert.xyz[0] + obj.matrix_world[3][0],5) * settings.scale) + settings.offsetx
nvert.xyz[1] = (round(nvert.xyz[1] + obj.matrix_world[3][1],5) * settings.scale) + settings.offsety
nvert.xyz[2] = (round(nvert.xyz[2] + obj.matrix_world[3][2],5) * settings.scale) + settings.offsetz
nvert.normal = nvert.Encode(vert.normal)
for i in range(0,3):
nframe.mins[i] = min(nframe.mins[i],nvert.xyz[i])
nframe.maxs[i] = max(nframe.maxs[i],nvert.xyz[i])
minlength = math.sqrt(math.pow(nframe.mins[0],2) + math.pow(nframe.mins[1],2) + math.pow(nframe.mins[2],2))
maxlength = math.sqrt(math.pow(nframe.maxs[0],2) + math.pow(nframe.maxs[1],2) + math.pow(nframe.maxs[2],2))
nframe.radius = round(max(minlength,maxlength),5)
nsurface.verts.append(nvert)
md3.frames.append(nframe)
nsurface.numFrames += 1
bpy.data.meshes.remove(fobj)
md3.surfaces.append(nsurface)
md3.numSurfaces += 1
elif obj.type == 'EMPTY':
md3.numTags += 1
for frame in range(bpy.context.scene.frame_start,bpy.context.scene.frame_end + 1):
bpy.context.scene.set_frame(frame)
ntag = md3Tag()
ntag.origin[0] = (round(obj.matrix_world[3][0] * settings.scale,5)) + settings.offsetx
ntag.origin[1] = (round(obj.matrix_world[3][1] * settings.scale,5)) + settings.offsety
ntag.origin[2] = (round(obj.matrix_world[3][2] * settings.scale,5)) + settings.offsetz
ntag.axis[0] = obj.matrix_world[0][0]
ntag.axis[1] = obj.matrix_world[0][1]
ntag.axis[2] = obj.matrix_world[0][2]
ntag.axis[3] = obj.matrix_world[1][0]
ntag.axis[4] = obj.matrix_world[1][1]
ntag.axis[5] = obj.matrix_world[1][2]
ntag.axis[6] = obj.matrix_world[2][0]
ntag.axis[7] = obj.matrix_world[2][1]
ntag.axis[8] = obj.matrix_world[2][2]
md3.tags.append(ntag)
if md3.numSurfaces < 1:
message(log,"Select a mesh to export!")
if log:
log.close()
return
file = open(settings.savepath, "wb")
md3.Save(file)
print_md3(log,md3,settings.dumpall)
file.close()
message(log,"MD3: " + settings.name + " saved to " + settings.savepath)
if log:
print("Logged to",settings.logpath)
log.close()
from bpy.props import *
class ExportMD3(bpy.types.Operator):
"""Export to Quake Model 3 (.md3)"""
bl_idname = "export.md3"
bl_label = 'Export MD3'
filepath = StringProperty(subtype = 'FILE_PATH',name="File Path", description="Filepath for exporting", maxlen= 1024, default= "")
md3name = StringProperty(name="MD3 Name", description="MD3 header name / skin path (64 bytes)",maxlen=64,default="")
md3log = StringProperty(name="MD3 Log", description="MD3 log file path",maxlen=1024,default="export_md3.log")
md3overwritelog = BoolProperty(name="Overwrite log", description="Overwrite log (off == append)", default=True)
md3dumpall = BoolProperty(name="Dump all", description="Dump all data for md3 to log",default=False)
md3ignoreuvs = BoolProperty(name="Ignore UVs", description="Ignores uv influence on mesh generation. Use if uv map not made.",default=False)
md3scale = FloatProperty(name="Scale", description="Scale all objects from world origin (0,0,0)",default=1.0,precision=5)
md3offsetx = FloatProperty(name="Offset X", description="Transition scene along x axis",default=0.0,precision=5)
md3offsety = FloatProperty(name="Offset Y", description="Transition scene along y axis",default=0.0,precision=5)
md3offsetz = FloatProperty(name="Offset Z", description="Transition scene along z axis",default=0.0,precision=5)
def execute(self, context):
settings = md3Settings(savepath = self.properties.filepath,
name = self.properties.md3name,
logpath = self.properties.md3log,
overwrite = self.properties.md3overwritelog,
dumpall = self.properties.md3dumpall,
ignoreuvs = self.properties.md3ignoreuvs,
scale = self.properties.md3scale,
offsetx = self.properties.md3offsetx,
offsety = self.properties.md3offsety,
offsetz = self.properties.md3offsetz)
save_md3(settings)
return {'FINISHED'}
def invoke(self, context, event):
wm = context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
@classmethod
def poll(cls, context):
return context.active_object is not None
def menu_func(self, context):
newpath = os.path.splitext(bpy.context.blend_data.filepath)[0] + ".md3"
self.layout.operator(ExportMD3.bl_idname, text="Quake Model 3 (.md3)").filepath = newpath
def register():
bpy.types.INFO_MT_file_export.append(menu_func)
def unregister():
bpy.types.INFO_MT_file_export.remove(menu_func)
if __name__ == "__main__":
register()
|
SkyTruth/luigi
|
refs/heads/master
|
luigi/s3.py
|
38
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Implementation of Simple Storage Service support.
:py:class:`S3Target` is a subclass of the Target class to support S3 file system operations
"""
from __future__ import division
import itertools
import logging
import os
import os.path
try:
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
import warnings
try:
from ConfigParser import NoSectionError
except ImportError:
from configparser import NoSectionError
from luigi import six
from luigi.six.moves import range
from luigi import configuration
from luigi.format import get_default_format
from luigi.parameter import Parameter
from luigi.target import FileAlreadyExists, FileSystem, FileSystemException, FileSystemTarget, AtomicLocalFile, MissingParentDirectory
from luigi.task import ExternalTask
logger = logging.getLogger('luigi-interface')
try:
import boto
from boto.s3.key import Key
except ImportError:
logger.warning("Loading s3 module without boto installed. Will crash at "
"runtime if s3 functionality is used.")
# two different ways of marking a directory
# with a suffix in S3
S3_DIRECTORY_MARKER_SUFFIX_0 = '_$folder$'
S3_DIRECTORY_MARKER_SUFFIX_1 = '/'
class InvalidDeleteException(FileSystemException):
pass
class FileNotFoundException(FileSystemException):
pass
class S3Client(FileSystem):
"""
boto-powered S3 client.
"""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
**kwargs):
options = self._get_s3_config()
options.update(kwargs)
# Removing key args would break backwards compability
if not aws_access_key_id:
aws_access_key_id = options.get('aws_access_key_id')
if not aws_secret_access_key:
aws_secret_access_key = options.get('aws_secret_access_key')
for key in ['aws_access_key_id', 'aws_secret_access_key']:
if key in options:
options.pop(key)
self.s3 = boto.connect_s3(aws_access_key_id,
aws_secret_access_key,
**options)
def exists(self, path):
"""
Does provided path exist on S3?
"""
(bucket, key) = self._path_to_bucket_and_key(path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# root always exists
if self._is_root(key):
return True
# file
s3_key = s3_bucket.get_key(key)
if s3_key:
return True
if self.isdir(path):
return True
logger.debug('Path %s does not exist', path)
return False
def remove(self, path, recursive=True):
"""
Remove a file or directory from S3.
"""
if not self.exists(path):
logger.debug('Could not delete %s; path does not exist', path)
return False
(bucket, key) = self._path_to_bucket_and_key(path)
# root
if self._is_root(key):
raise InvalidDeleteException(
'Cannot delete root of bucket at path %s' % path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# file
s3_key = s3_bucket.get_key(key)
if s3_key:
s3_bucket.delete_key(s3_key)
logger.debug('Deleting %s from bucket %s', key, bucket)
return True
if self.isdir(path) and not recursive:
raise InvalidDeleteException(
'Path %s is a directory. Must use recursive delete' % path)
delete_key_list = [
k for k in s3_bucket.list(self._add_path_delimiter(key))]
if len(delete_key_list) > 0:
for k in delete_key_list:
logger.debug('Deleting %s from bucket %s', k, bucket)
s3_bucket.delete_keys(delete_key_list)
return True
return False
def get_key(self, path):
(bucket, key) = self._path_to_bucket_and_key(path)
s3_bucket = self.s3.get_bucket(bucket, validate=True)
return s3_bucket.get_key(key)
def put(self, local_path, destination_s3_path):
"""
Put an object stored locally to an S3 path.
"""
(bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# put the file
s3_key = Key(s3_bucket)
s3_key.key = key
s3_key.set_contents_from_filename(local_path)
def put_string(self, content, destination_s3_path):
"""
Put a string to an S3 path.
"""
(bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# put the content
s3_key = Key(s3_bucket)
s3_key.key = key
s3_key.set_contents_from_string(content)
def put_multipart(self, local_path, destination_s3_path, part_size=67108864):
"""
Put an object stored locally to an S3 path
using S3 multi-part upload (for files > 5GB).
:param local_path: Path to source local file
:param destination_s3_path: URL for target S3 location
:param part_size: Part size in bytes. Default: 67108864 (64MB), must be >= 5MB and <= 5 GB.
"""
# calculate number of parts to upload
# based on the size of the file
source_size = os.stat(local_path).st_size
if source_size <= part_size:
# fallback to standard, non-multipart strategy
return self.put(local_path, destination_s3_path)
(bucket, key) = self._path_to_bucket_and_key(destination_s3_path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# calculate the number of parts (int division).
# use modulo to avoid float precision issues
# for exactly-sized fits
num_parts = \
(source_size // part_size) \
if source_size % part_size == 0 \
else (source_size // part_size) + 1
mp = None
try:
mp = s3_bucket.initiate_multipart_upload(key)
for i in range(num_parts):
# upload a part at a time to S3
offset = part_size * i
bytes = min(part_size, source_size - offset)
with open(local_path, 'rb') as fp:
part_num = i + 1
logger.info('Uploading part %s/%s to %s', part_num, num_parts, destination_s3_path)
fp.seek(offset)
mp.upload_part_from_file(fp, part_num=part_num, size=bytes)
# finish the upload, making the file available in S3
mp.complete_upload()
except BaseException:
if mp:
logger.info('Canceling multipart s3 upload for %s', destination_s3_path)
# cancel the upload so we don't get charged for
# storage consumed by uploaded parts
mp.cancel_upload()
raise
def copy(self, source_path, destination_path):
"""
Copy an object from one S3 location to another.
"""
(src_bucket, src_key) = self._path_to_bucket_and_key(source_path)
(dst_bucket, dst_key) = self._path_to_bucket_and_key(destination_path)
s3_bucket = self.s3.get_bucket(dst_bucket, validate=True)
if self.isdir(source_path):
src_prefix = self._add_path_delimiter(src_key)
dst_prefix = self._add_path_delimiter(dst_key)
for key in self.list(source_path):
s3_bucket.copy_key(dst_prefix + key,
src_bucket,
src_prefix + key)
else:
s3_bucket.copy_key(dst_key, src_bucket, src_key)
def rename(self, source_path, destination_path):
"""
Rename/move an object from one S3 location to another.
"""
self.copy(source_path, destination_path)
self.remove(source_path)
def listdir(self, path):
"""
Get an iterable with S3 folder contents.
Iterable contains paths relative to queried path.
"""
(bucket, key) = self._path_to_bucket_and_key(path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
key_path = self._add_path_delimiter(key)
key_path_len = len(key_path)
for item in s3_bucket.list(prefix=key_path):
yield self._add_path_delimiter(path) + item.key[key_path_len:]
def list(self, path): # backwards compat
key_path_len = len(self._add_path_delimiter(path))
for item in self.listdir(path):
yield item[key_path_len:]
def isdir(self, path):
"""
Is the parameter S3 path a directory?
"""
(bucket, key) = self._path_to_bucket_and_key(path)
# grab and validate the bucket
s3_bucket = self.s3.get_bucket(bucket, validate=True)
# root is a directory
if self._is_root(key):
return True
for suffix in (S3_DIRECTORY_MARKER_SUFFIX_0,
S3_DIRECTORY_MARKER_SUFFIX_1):
s3_dir_with_suffix_key = s3_bucket.get_key(key + suffix)
if s3_dir_with_suffix_key:
return True
# files with this prefix
key_path = self._add_path_delimiter(key)
s3_bucket_list_result = \
list(itertools.islice(
s3_bucket.list(prefix=key_path),
1))
if s3_bucket_list_result:
return True
return False
is_dir = isdir # compatibility with old version.
def mkdir(self, path, parents=True, raise_if_exists=False):
if raise_if_exists and self.isdir(path):
raise FileAlreadyExists()
_, key = self._path_to_bucket_and_key(path)
if self._is_root(key):
return # isdir raises if the bucket doesn't exist; nothing to do here.
key = self._add_path_delimiter(key)
if not parents and not self.isdir(os.path.dirname(key)):
raise MissingParentDirectory()
return self.put_string("", self._add_path_delimiter(path))
def _get_s3_config(self, key=None):
try:
config = dict(configuration.get_config().items('s3'))
except NoSectionError:
return {}
# So what ports etc can be read without us having to specify all dtypes
for k, v in six.iteritems(config):
try:
config[k] = int(v)
except ValueError:
pass
if key:
return config.get(key)
return config
def _path_to_bucket_and_key(self, path):
(scheme, netloc, path, query, fragment) = urlsplit(path)
path_without_initial_slash = path[1:]
return netloc, path_without_initial_slash
def _is_root(self, key):
return (len(key) == 0) or (key == '/')
def _add_path_delimiter(self, key):
return key if key[-1:] == '/' else key + '/'
class AtomicS3File(AtomicLocalFile):
"""
An S3 file that writes to a temp file and put to S3 on close.
"""
def __init__(self, path, s3_client):
self.s3_client = s3_client
super(AtomicS3File, self).__init__(path)
def move_to_final_destination(self):
self.s3_client.put_multipart(self.tmp_path, self.path)
class ReadableS3File(object):
def __init__(self, s3_key):
self.s3_key = s3_key
self.buffer = []
self.closed = False
self.finished = False
def read(self, size=0):
f = self.s3_key.read(size=size)
# boto will loop on the key forever and it's not what is expected by
# the python io interface
# boto/boto#2805
if f == b'':
self.finished = True
if self.finished:
return b''
return f
def close(self):
self.s3_key.close()
self.closed = True
def __del__(self):
self.close()
def __exit__(self, exc_type, exc, traceback):
self.close()
def __enter__(self):
return self
def _add_to_buffer(self, line):
self.buffer.append(line)
def _flush_buffer(self):
output = b''.join(self.buffer)
self.buffer = []
return output
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return False
def __iter__(self):
key_iter = self.s3_key.__iter__()
has_next = True
while has_next:
try:
# grab the next chunk
chunk = next(key_iter)
# split on newlines, preserving the newline
for line in chunk.splitlines(True):
if not line.endswith(os.linesep):
# no newline, so store in buffer
self._add_to_buffer(line)
else:
# newline found, send it out
if self.buffer:
self._add_to_buffer(line)
yield self._flush_buffer()
else:
yield line
except StopIteration:
# send out anything we have left in the buffer
output = self._flush_buffer()
if output:
yield output
has_next = False
self.close()
class S3Target(FileSystemTarget):
"""
"""
fs = None
def __init__(self, path, format=None, client=None):
super(S3Target, self).__init__(path)
if format is None:
format = get_default_format()
self.format = format
self.fs = client or S3Client()
def open(self, mode='r'):
"""
"""
if mode not in ('r', 'w'):
raise ValueError("Unsupported open mode '%s'" % mode)
if mode == 'r':
s3_key = self.fs.get_key(self.path)
if not s3_key:
raise FileNotFoundException("Could not find file at %s" % self.path)
fileobj = ReadableS3File(s3_key)
return self.format.pipe_reader(fileobj)
else:
return self.format.pipe_writer(AtomicS3File(self.path, self.fs))
class S3FlagTarget(S3Target):
"""
Defines a target directory with a flag-file (defaults to `_SUCCESS`) used
to signify job success.
This checks for two things:
* the path exists (just like the S3Target)
* the _SUCCESS file exists within the directory.
Because Hadoop outputs into a directory and not a single file,
the path is assumed to be a directory.
This is meant to be a handy alternative to AtomicS3File.
The AtomicFile approach can be burdensome for S3 since there are no directories, per se.
If we have 1,000,000 output files, then we have to rename 1,000,000 objects.
"""
fs = None
def __init__(self, path, format=None, client=None, flag='_SUCCESS'):
"""
Initializes a S3FlagTarget.
:param path: the directory where the files are stored.
:type path: str
:param client:
:type client:
:param flag:
:type flag: str
"""
if format is None:
format = get_default_format()
if path[-1] != "/":
raise ValueError("S3FlagTarget requires the path to be to a "
"directory. It must end with a slash ( / ).")
super(S3FlagTarget, self).__init__(path)
self.format = format
self.fs = client or S3Client()
self.flag = flag
def exists(self):
hadoopSemaphore = self.path + self.flag
return self.fs.exists(hadoopSemaphore)
class S3EmrTarget(S3FlagTarget):
"""
Deprecated. Use :py:class:`S3FlagTarget`
"""
def __init__(self, *args, **kwargs):
warnings.warn("S3EmrTarget is deprecated. Please use S3FlagTarget")
super(S3EmrTarget, self).__init__(*args, **kwargs)
class S3PathTask(ExternalTask):
"""
A external task that to require existence of a path in S3.
"""
path = Parameter()
def output(self):
return S3Target(self.path)
class S3EmrTask(ExternalTask):
"""
An external task that requires the existence of EMR output in S3.
"""
path = Parameter()
def output(self):
return S3EmrTarget(self.path)
class S3FlagTask(ExternalTask):
"""
An external task that requires the existence of EMR output in S3.
"""
path = Parameter()
flag = Parameter(default=None)
def output(self):
return S3FlagTarget(self.path, flag=self.flag)
|
lifeeth/Zeya
|
refs/heads/master
|
directory.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Samson Yeung, Phil Sung
#
# This file is part of Zeya.
#
# Zeya is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Zeya is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Zeya. If not, see <http://www.gnu.org/licenses/>.
# Directory backend.
#
# Files in the specified directory are read for artist/title/album tag which is
# then saved in a database (zeya.db) stored in that directory.
import os
import tagpy
import pickle
from backends import LibraryBackend
from common import tokenize_filename
KEY = 'key'
TITLE = 'title'
ARTIST = 'artist'
ALBUM = 'album'
DB = 'db'
KEY_FILENAME = 'key_filename'
MTIMES = 'mtimes'
def album_name_from_path(tag, filename):
"""
Returns an appropriate Unicode string to use for the album name if the tag
is empty.
"""
if tag is not None and (tag.artist or tag.album):
return u''
# Use the trailing components of the path.
path_components = [x for x in os.path.dirname(filename).split(os.sep) if x]
if len(path_components) >= 2:
return os.sep.join(path_components[-2:]).decode("UTF-8")
elif len(path_components) == 1:
return path_components[0].decode("UTF-8")
return u''
class DirectoryBackend(LibraryBackend):
"""
Object that controls access to music in a given directory.
"""
def __init__(self, media_path, save_db=True):
"""
Initializes a DirectoryBackend that reads from the specified directory.
save_db can be set to False to prevent the db from being written back
to disk. This is probably only useful for debugging purposes.
"""
self._media_path = os.path.expanduser(media_path)
self._save_db = save_db
# Sequence of dicts containing song metadata (key, artist, title, album)
self.db = []
# Dict mapping keys to source filenames
self.key_filename = {}
# Dict mapping filenames to mtimes
self.mtimes = {}
self.setup_db()
def get_db_filename(self):
return os.path.join(self._media_path, 'zeya.db')
def setup_db(self):
# Load the previous database from file, and convert it to a
# representation where it can serve as a metadata cache (keyed by
# filename) when we load the file collection.
previous_db = self.load_previous_db()
self.fill_db(previous_db)
if self._save_db:
self.save_db()
def load_previous_db(self):
"""
Read the existing database on disk and return a dict mapping each
filename to the (mtime, metadata) associated with the filename.
"""
filename_to_metadata_map = {}
try:
# Load the old data structures from file.
info = pickle.load(open(self.get_db_filename(), 'r'))
key_to_metadata_map = {}
prev_mtimes = info[MTIMES]
# Construct a map from keys to metadata.
for db_entry in info[DB]:
key_to_metadata_map[db_entry[KEY]] = db_entry
# Construct a map from filename to (mtime, metadata) associated
# with that file.
for (key, filename) in info[KEY_FILENAME].iteritems():
filename_to_metadata_map[filename] = \
(prev_mtimes[filename], key_to_metadata_map[key])
except IOError:
# Couldn't read the file. Just return an empty data structure.
pass
return filename_to_metadata_map
def save_db(self):
self.info = {DB: self.db,
MTIMES: self.mtimes,
KEY_FILENAME: self.key_filename}
pickle.dump(self.info, open(self.get_db_filename(), 'wb+'))
def fill_db(self, previous_db):
"""
Populate the database, given the output of load_previous_db.
"""
print "Scanning for music in %r..." % (os.path.abspath(self._media_path),)
# Iterate over all the files.
for path, dirs, files in os.walk(self._media_path):
# Sort dirs so that subdirectories will subsequently be visited
# alphabetically (see os.walk).
dirs.sort(key=tokenize_filename)
for filename in sorted(files, key=tokenize_filename):
filename = os.path.abspath(os.path.join(path, filename))
# For each file that we encounter, see if we have cached data
# for it, and if we do, use it instead of calling out to tagpy.
# previous_db acts as a cache of mtime and metadata, keyed by
# filename.
rec_mtime, old_metadata = previous_db.get(filename, (None, None))
try:
file_mtime = os.stat(filename).st_mtime
except OSError:
continue
# Set the artist, title, and album in this block, and the key
# below.
if rec_mtime is not None and rec_mtime >= file_mtime:
# Use cached data. However, we potentially renumber the
# keys every time, so the old KEY is no good. We'll update
# the KEY field later.
metadata = old_metadata
else:
# In this branch, we actually need to read the file and
# extract its metadata.
try:
metadata = extract_metadata(filename)
except ValueError:
# If there was any exception, then ignore the file and
# continue.
continue
# Number the keys consecutively starting from 0.
next_key = len(self.key_filename)
metadata[KEY] = next_key
self.db.append(metadata)
self.key_filename[next_key] = filename
self.mtimes[filename] = file_mtime
def get_library_contents(self):
return self.db
def get_filename_from_key(self, key):
return self.key_filename[int(key)]
def extract_metadata(filename, tagpy_module = tagpy):
"""
Returns a metadata dictionary (a dictionary {ARTIST: ..., ...}) containing
metadata (artist, title, and album) for the song in question.
filename: a string supplying a filename.
tagpy_module: a reference to the tagpy module. This can be faked out for
unit testing.
"""
# tagpy can do one of three things:
#
# * Return legitimate data. We'll load that data.
# * Return None. We'll assume this is a music file but that it doesn't have
# metadata. Create an entry for it.
# * Throw ValueError. We'll assume this is not something we could play.
# Don't create an enty for it.
try:
tag = tagpy_module.FileRef(filename).tag()
except:
raise ValueError("Error reading metadata from %r" % (filename,))
# If no metadata is available, set the title to be the basename of the
# file. (We have to ensure that the title, in particular, is not empty
# since the user has to click on it in the web UI.)
metadata = {
TITLE: os.path.basename(filename).decode("UTF-8"),
ARTIST: '',
ALBUM: album_name_from_path(tag, filename),
}
if tag is not None:
metadata[ARTIST] = tag.artist
# Again, do not allow metadata[TITLE] to be an empty string, even if
# tag.title is an empty string.
metadata[TITLE] = tag.title or metadata[TITLE]
metadata[ALBUM] = tag.album or metadata[ALBUM]
return metadata
|
chrisdunelm/grpc
|
refs/heads/master
|
third_party/nanopb/tests/site_scons/site_tools/nanopb.py
|
79
|
'''
Scons Builder for nanopb .proto definitions.
This tool will locate the nanopb generator and use it to generate .pb.c and
.pb.h files from the .proto files.
Basic example
-------------
# Build myproto.pb.c and myproto.pb.h from myproto.proto
myproto = env.NanopbProto("myproto")
# Link nanopb core to the program
env.Append(CPPPATH = "$NANOB")
myprog = env.Program(["myprog.c", myproto, "$NANOPB/pb_encode.c", "$NANOPB/pb_decode.c"])
Configuration options
---------------------
Normally, this script is used in the test environment of nanopb and it locates
the nanopb generator by a relative path. If this script is used in another
application, the path to nanopb root directory has to be defined:
env.SetDefault(NANOPB = "path/to/nanopb")
Additionally, the path to protoc and the options to give to protoc can be
defined manually:
env.SetDefault(PROTOC = "path/to/protoc")
env.SetDefault(PROTOCFLAGS = "--plugin=protoc-gen-nanopb=path/to/protoc-gen-nanopb")
'''
import SCons.Action
import SCons.Builder
import SCons.Util
import os.path
class NanopbWarning(SCons.Warnings.Warning):
pass
SCons.Warnings.enableWarningClass(NanopbWarning)
def _detect_nanopb(env):
'''Find the path to nanopb root directory.'''
if env.has_key('NANOPB'):
# Use nanopb dir given by user
return env['NANOPB']
p = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
if os.path.isdir(p) and os.path.isfile(os.path.join(p, 'pb.h')):
# Assume we are running under tests/site_scons/site_tools
return p
raise SCons.Errors.StopError(NanopbWarning,
"Could not find the nanopb root directory")
def _detect_protoc(env):
'''Find the path to the protoc compiler.'''
if env.has_key('PROTOC'):
# Use protoc defined by user
return env['PROTOC']
n = _detect_nanopb(env)
p1 = os.path.join(n, 'generator-bin', 'protoc' + env['PROGSUFFIX'])
if os.path.exists(p1):
# Use protoc bundled with binary package
return env['ESCAPE'](p1)
p = env.WhereIs('protoc')
if p:
# Use protoc from path
return env['ESCAPE'](p)
raise SCons.Errors.StopError(NanopbWarning,
"Could not find the protoc compiler")
def _detect_protocflags(env):
'''Find the options to use for protoc.'''
if env.has_key('PROTOCFLAGS'):
return env['PROTOCFLAGS']
p = _detect_protoc(env)
n = _detect_nanopb(env)
p1 = os.path.join(n, 'generator-bin', 'protoc' + env['PROGSUFFIX'])
if p == env['ESCAPE'](p1):
# Using the bundled protoc, no options needed
return ''
e = env['ESCAPE']
if env['PLATFORM'] == 'win32':
return e('--plugin=protoc-gen-nanopb=' + os.path.join(n, 'generator', 'protoc-gen-nanopb.bat'))
else:
return e('--plugin=protoc-gen-nanopb=' + os.path.join(n, 'generator', 'protoc-gen-nanopb'))
def _nanopb_proto_actions(source, target, env, for_signature):
esc = env['ESCAPE']
dirs = ' '.join(['-I' + esc(env.GetBuildPath(d)) for d in env['PROTOCPATH']])
return '$PROTOC $PROTOCFLAGS %s --nanopb_out=. %s' % (dirs, esc(str(source[0])))
def _nanopb_proto_emitter(target, source, env):
basename = os.path.splitext(str(source[0]))[0]
target.append(basename + '.pb.h')
if os.path.exists(basename + '.options'):
source.append(basename + '.options')
return target, source
_nanopb_proto_builder = SCons.Builder.Builder(
generator = _nanopb_proto_actions,
suffix = '.pb.c',
src_suffix = '.proto',
emitter = _nanopb_proto_emitter)
def generate(env):
'''Add Builder for nanopb protos.'''
env['NANOPB'] = _detect_nanopb(env)
env['PROTOC'] = _detect_protoc(env)
env['PROTOCFLAGS'] = _detect_protocflags(env)
env.SetDefault(PROTOCPATH = ['.', os.path.join(env['NANOPB'], 'generator', 'proto')])
env.SetDefault(NANOPB_PROTO_CMD = '$PROTOC $PROTOCFLAGS --nanopb_out=. $SOURCES')
env['BUILDERS']['NanopbProto'] = _nanopb_proto_builder
def exists(env):
return _detect_protoc(env) and _detect_protoc_opts(env)
|
whbrewer/spc
|
refs/heads/master
|
src/gluino/contrib/pymysql/util.py
|
95
|
import struct
def byte2int(b):
if isinstance(b, int):
return b
else:
return struct.unpack("!B", b)[0]
def int2byte(i):
return struct.pack("!B", i)
def join_bytes(bs):
if len(bs) == 0:
return ""
else:
rv = bs[0]
for b in bs[1:]:
rv += b
return rv
|
denys-duchier/django
|
refs/heads/master
|
tests/order_with_respect_to/base_tests.py
|
63
|
"""
The tests are shared with contenttypes_tests and so shouldn't import or
reference any models directly. Subclasses should inherit django.test.TestCase.
"""
from operator import attrgetter
class BaseOrderWithRespectToTests:
# Hook to allow subclasses to run these tests with alternate models.
Answer = None
Post = None
Question = None
@classmethod
def setUpTestData(cls):
cls.q1 = cls.Question.objects.create(text="Which Beatle starts with the letter 'R'?")
cls.Answer.objects.create(text="John", question=cls.q1)
cls.Answer.objects.create(text="Paul", question=cls.q1)
cls.Answer.objects.create(text="George", question=cls.q1)
cls.Answer.objects.create(text="Ringo", question=cls.q1)
def test_default_to_insertion_order(self):
# Answers will always be ordered in the order they were inserted.
self.assertQuerysetEqual(
self.q1.answer_set.all(), [
"John", "Paul", "George", "Ringo",
],
attrgetter("text"),
)
def test_previous_and_next_in_order(self):
# We can retrieve the answers related to a particular object, in the
# order they were created, once we have a particular object.
a1 = self.q1.answer_set.all()[0]
self.assertEqual(a1.text, "John")
self.assertEqual(a1.get_next_in_order().text, "Paul")
a2 = list(self.q1.answer_set.all())[-1]
self.assertEqual(a2.text, "Ringo")
self.assertEqual(a2.get_previous_in_order().text, "George")
def test_item_ordering(self):
# We can retrieve the ordering of the queryset from a particular item.
a1 = self.q1.answer_set.all()[1]
id_list = [o.pk for o in self.q1.answer_set.all()]
self.assertSequenceEqual(a1.question.get_answer_order(), id_list)
# It doesn't matter which answer we use to check the order, it will
# always be the same.
a2 = self.Answer.objects.create(text="Number five", question=self.q1)
self.assertEqual(list(a1.question.get_answer_order()), list(a2.question.get_answer_order()))
def test_change_ordering(self):
# The ordering can be altered
a = self.Answer.objects.create(text="Number five", question=self.q1)
# Swap the last two items in the order list
id_list = [o.pk for o in self.q1.answer_set.all()]
x = id_list.pop()
id_list.insert(-1, x)
# By default, the ordering is different from the swapped version
self.assertNotEqual(list(a.question.get_answer_order()), id_list)
# Change the ordering to the swapped version -
# this changes the ordering of the queryset.
a.question.set_answer_order(id_list)
self.assertQuerysetEqual(
self.q1.answer_set.all(), [
"John", "Paul", "George", "Number five", "Ringo"
],
attrgetter("text")
)
def test_recursive_ordering(self):
p1 = self.Post.objects.create(title="1")
p2 = self.Post.objects.create(title="2")
p1_1 = self.Post.objects.create(title="1.1", parent=p1)
p1_2 = self.Post.objects.create(title="1.2", parent=p1)
self.Post.objects.create(title="2.1", parent=p2)
p1_3 = self.Post.objects.create(title="1.3", parent=p1)
self.assertSequenceEqual(p1.get_post_order(), [p1_1.pk, p1_2.pk, p1_3.pk])
|
perfidia/pytenseshift
|
refs/heads/master
|
en/parser/nltk_lite/corpora/gutenberg.py
|
9
|
# Natural Language Toolkit: Gutenberg Corpus Reader
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <sb@ldc.upenn.edu>
# Edward Loper <edloper@gradient.cis.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Read tokens from the NLTK Gutenberg Corpus.
Project Gutenberg -- http://gutenberg.net/
This corpus contains selected texts from Project Gutenberg:
* Jane Austen (3)
* William Blake (2)
* G. K. Chesterton (3)
* King James Bible
* John Milton
* William Shakespeare (3)
* Walt Whitman
"""
from en.parser.nltk_lite.corpora import get_basedir
from en.parser.nltk_lite import tokenize
import os, re
items = [
'austen-emma',
'austen-persuasion',
'austen-sense',
'bible-kjv',
'blake-poems',
'blake-songs',
'chesterton-ball',
'chesterton-brown',
'chesterton-thursday',
'milton-paradise',
'shakespeare-caesar',
'shakespeare-hamlet',
'shakespeare-macbeth',
'whitman-leaves'
]
item_name = {
'austen-emma': 'Jane Austen: Emma',
'austen-persuasion': 'Jane Austen: Persuasion',
'austen-sense': 'Jane Austen: Sense and Sensibility',
'bible-kjv': 'King James Bible',
'blake-poems': 'William Blake: Poems',
'blake-songs': 'Willian Blake: Songs of Innocence and Experience',
'chesterton-ball': 'G.K. Chesterton: The Ball and The Cross',
'chesterton-brown': 'G.K. Chesterton: The Wisdom of Father Brown',
'chesterton-thursday': 'G.K. Chesterton: The Man Who Was Thursday',
'milton-paradise': 'John Milton: Paradise Lost',
'shakespeare-caesar': 'William Shakespeare: Julius Caesar',
'shakespeare-hamlet': 'William Shakespeare: Hamlet',
'shakespeare-macbeth': 'William Shakespeare: Macbeth',
'whitman-leaves': 'Walt Whitman: Leaves of Grass',
}
def raw(files = items):
if type(files) is str: files = (files,)
for file in files:
path = os.path.join(get_basedir(), "gutenberg", file + ".txt")
f = open(path)
preamble = True
for line in f.readlines():
if not preamble:
for t in tokenize.wordpunct(line):
yield t
if line[:5] == '*END*':
preamble = False
def demo():
from en.parser.nltk_lite.corpora import gutenberg
from itertools import islice
for word in islice(gutenberg.raw('bible-kjv'), 0, 100):
print word,
if __name__ == '__main__':
demo()
|
silly-wacky-3-town-toon/SOURCE-COD
|
refs/heads/master
|
toontown/suit/DistributedLawbotBossSuit.py
|
1
|
from panda3d.core import *
from panda3d.direct import *
from direct.interval.IntervalGlobal import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
import DistributedSuitBase
from toontown.toonbase import ToontownGlobals
from toontown.battle import MovieUtil
class DistributedLawbotBossSuit(DistributedSuitBase.DistributedSuitBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedLawbotBossSuit')
timeToShow = 1.0
timeToRelease = 3.15
throwPaperEndTime = 4.33
def __init__(self, cr):
self.flyingEvidenceTrack = None
try:
self.DistributedSuit_initialized
except:
self.DistributedSuit_initialized = 1
DistributedSuitBase.DistributedSuitBase.__init__(self, cr)
self.activeIntervals = {}
self.boss = None
self.fsm = ClassicFSM.ClassicFSM('DistributedLawbotBossSuit', [
State.State('Off',
self.enterOff,
self.exitOff, [
'Walk',
'Battle',
'neutral']),
State.State('Walk',
self.enterWalk,
self.exitWalk, [
'WaitForBattle',
'Battle']),
State.State('Battle',
self.enterBattle,
self.exitBattle, []),
State.State('neutral',
self.enterNeutral,
self.exitNeutral, [
'PreThrowProsecute',
'PreThrowAttack',
'Stunned']),
State.State('PreThrowProsecute',
self.enterPreThrowProsecute,
self.exitPreThrowProsecute,
['PostThrowProsecute',
'neutral',
'Stunned']),
State.State('PostThrowProsecute',
self.enterPostThrowProsecute,
self.exitPostThrowProsecute, [
'neutral',
'Stunned']),
State.State('PreThrowAttack',
self.enterPreThrowAttack,
self.exitPreThrowAttack, [
'PostThrowAttack',
'neutral',
'Stunned']),
State.State('PostThrowAttack',
self.enterPostThrowAttack,
self.exitPostThrowAttack, [
'neutral',
'Stunned']),
State.State('Stunned',
self.enterStunned,
self.exitStunned, [
'neutral']),
State.State('WaitForBattle',
self.enterWaitForBattle,
self.exitWaitForBattle, [
'Battle'])],
'Off', 'Off')
self.fsm.enterInitialState()
return
def generate(self):
self.notify.debug('DLBS.generate:')
DistributedSuitBase.DistributedSuitBase.generate(self)
def announceGenerate(self):
DistributedSuitBase.DistributedSuitBase.announceGenerate(self)
self.notify.debug('DLBS.announceGenerate')
colNode = self.find('**/distAvatarCollNode*')
colNode.setTag('pieCode', str(ToontownGlobals.PieCodeLawyer))
self.attackEvidenceA = self.getEvidence(True)
self.attackEvidenceB = self.getEvidence(True)
self.attackEvidence = self.attackEvidenceA
self.prosecuteEvidence = self.getEvidence(False)
self.hideName()
self.setPickable(False)
def disable(self):
self.notify.debug('DistributedSuit %d: disabling' % self.getDoId())
self.setState('Off')
DistributedSuitBase.DistributedSuitBase.disable(self)
self.cleanupIntervals()
self.boss = None
return
def delete(self):
try:
self.DistributedSuit_deleted
except:
self.DistributedSuit_deleted = 1
self.notify.debug('DistributedSuit %d: deleting' % self.getDoId())
del self.fsm
DistributedSuitBase.DistributedSuitBase.delete(self)
def d_requestBattle(self, pos, hpr):
self.cr.playGame.getPlace().setState('WaitForBattle')
self.sendUpdate('requestBattle', [pos[0],
pos[1],
pos[2],
hpr[0],
hpr[1],
hpr[2]])
return None
def __handleToonCollision(self, collEntry):
toonId = base.localAvatar.getDoId()
self.notify.debug('Distributed suit: requesting a Battle with ' + 'toon: %d' % toonId)
self.d_requestBattle(self.getPos(), self.getHpr())
self.setState('WaitForBattle')
return None
def enterWalk(self):
self.notify.debug('enterWalk')
self.enableBattleDetect('walk', self.__handleToonCollision)
self.loop('walk', 0)
pathPoints = [Vec3(50, 15, 0),
Vec3(50, 25, 0),
Vec3(20, 25, 0),
Vec3(20, 15, 0),
Vec3(50, 15, 0)]
self.tutWalkTrack = self.makePathTrack(self, pathPoints, 4.5, 'tutFlunkyWalk')
self.tutWalkTrack.loop()
def exitWalk(self):
self.notify.debug('exitWalk')
self.disableBattleDetect()
self.tutWalkTrack.pause()
self.tutWalkTrack = None
return
def enterNeutral(self):
self.notify.debug('enterNeutral')
self.notify.debug('DistributedLawbotBossSuit: Neutral')
self.loop('neutral', 0)
def exitNeutral(self):
self.notify.debug('exitNeutral')
def doAttack(self, x1, y1, z1, x2, y2, z2):
self.notify.debug('x1=%.2f y1=%.2f z2=%.2f x2=%.2f y2=%.2f z2=%.2f' % (x1,
y1,
z1,
x2,
y2,
z2))
self.curTargetPt = Point3(x2, y2, z2)
self.fsm.request('PreThrowAttack')
return
attackEvidence = self.getEvidence(True)
nodePath = render
node = nodePath.attachNewNode('attackEvidence-%s' % self.doId)
node.setPos(x1, y1, z1)
duration = 3.0
throwName = self.uniqueName('lawyerAttack')
throwingSeq = self.makeAttackThrowingTrack(attackEvidence, duration, Point3(x2, y2, z2))
fullSequence = Sequence(throwingSeq, name=throwName)
self.activeIntervals[throwName] = fullSequence
fullSequence.start()
def doProsecute(self):
self.notify.debug('doProsecute')
bounds = self.boss.prosecutionColNodePath.getBounds()
panCenter = bounds.getCenter()
localPos = panCenter
prosecutionPanPos = render.getRelativePoint(self.boss.prosecutionColNodePath, localPos)
self.curTargetPt = prosecutionPanPos
self.fsm.request('PreThrowProsecute')
return
attackEvidence = self.getEvidence(False)
nodePath = render
node = nodePath.attachNewNode('prosecuteEvidence-%s' % self.doId)
node.setPos(self.getPos())
duration = ToontownGlobals.LawbotBossLawyerToPanTime
throwName = self.uniqueName('lawyerProsecute')
throwingSeq = self.makeProsecuteThrowingTrack(attackEvidence, duration, prosecutionPanPos)
fullSequence = Sequence(throwingSeq, Func(self.boss.flashGreen), Func(self.clearInterval, throwName), name=throwName)
self.activeIntervals[throwName] = fullSequence
fullSequence.start()
def makeDummySequence(self):
retval = Sequence(Wait(10))
return retval
def makeProsecuteThrowingTrack(self, evidence, inFlightDuration, hitPos):
suitTrack = Sequence()
suitTrack.append(ActorInterval(self, 'throw-paper'))
throwPaperDuration = suitTrack.getDuration()
inFlight = Parallel(evidence.posInterval(inFlightDuration, hitPos, fluid=1))
origHpr = self.getHpr()
self.headsUp(hitPos)
newHpr = self.getHpr()
self.setHpr(origHpr)
rotateTrack = Sequence(self.hprInterval(self.timeToShow, newHpr, fluid=1))
propTrack = Sequence(Func(evidence.hide), Func(evidence.setPos, 0, 0.5, -0.3), Func(evidence.reparentTo, self.getRightHand()), Wait(self.timeToShow), Func(evidence.show), Wait(self.timeToRelease - self.timeToShow), Func(evidence.wrtReparentTo, render), Func(self.makeDummySequence), inFlight, Func(evidence.detachNode))
throwingTrack = Parallel(suitTrack, propTrack, rotateTrack)
return throwingTrack
def makeAttackThrowingTrack(self, evidence, inFlightDuration, hitPos):
suitTrack = Sequence()
suitTrack.append(ActorInterval(self, 'throw-paper'))
throwPaperDuration = suitTrack.getDuration()
origHpr = self.getHpr()
self.headsUp(hitPos)
newHpr = self.getHpr()
self.setHpr(origHpr)
rotateTrack = Sequence(self.hprInterval(self.timeToShow, newHpr, fluid=1))
propTrack = Sequence(Func(evidence.hide), Func(evidence.setPos, 0, 0.5, -0.3), Func(evidence.reparentTo, self.getRightHand()), Wait(self.timeToShow), Func(evidence.show), Wait(self.timeToRelease - self.timeToShow), Func(evidence.wrtReparentTo, render), Func(evidence.setZ, 1.3), evidence.posInterval(inFlightDuration, hitPos, fluid=1), Func(evidence.detachNode))
throwingTrack = Parallel(suitTrack, propTrack, rotateTrack)
return throwingTrack
def makePreThrowAttackTrack(self, evidence, inFlightDuration, hitPos):
suitTrack = Sequence()
suitTrack.append(ActorInterval(self, 'throw-paper', endTime=self.timeToRelease))
throwPaperDuration = suitTrack.getDuration()
origHpr = self.getHpr()
self.headsUp(hitPos)
newHpr = self.getHpr()
self.setHpr(origHpr)
rotateTrack = Sequence(self.hprInterval(self.timeToShow, newHpr, fluid=1))
propTrack = Sequence(Func(evidence.hide), Func(evidence.setPos, 0, 0.5, -0.3), Func(evidence.setScale, 1), Func(evidence.setHpr, 0, 0, 0), Func(evidence.reparentTo, self.getRightHand()), Wait(self.timeToShow), Func(evidence.show), Wait(self.timeToRelease - self.timeToShow))
throwingTrack = Parallel(suitTrack, propTrack, rotateTrack)
return throwingTrack
def makePostThrowAttackTrack(self, evidence, inFlightDuration, hitPos):
suitTrack = Sequence()
suitTrack.append(ActorInterval(self, 'throw-paper', startTime=self.timeToRelease))
propTrack = Sequence(Func(evidence.wrtReparentTo, render), Func(evidence.setScale, 1), Func(evidence.show), Func(evidence.setZ, 1.3), evidence.posInterval(inFlightDuration, hitPos, fluid=1), Func(evidence.hide))
return (suitTrack, propTrack)
def makePreThrowProsecuteTrack(self, evidence, inFlightDuration, hitPos):
return self.makePreThrowAttackTrack(evidence, inFlightDuration, hitPos)
def makePostThrowProsecuteTrack(self, evidence, inFlightDuration, hitPos):
suitTrack = Sequence()
suitTrack.append(ActorInterval(self, 'throw-paper', startTime=self.timeToRelease))
propTrack = Sequence(Func(evidence.wrtReparentTo, render), Func(evidence.setScale, 1), Func(evidence.show), evidence.posInterval(inFlightDuration, hitPos, fluid=1), Func(evidence.hide))
return (suitTrack, propTrack)
def getEvidence(self, usedForAttack = False):
model = loader.loadModel('phase_5/models/props/lawbook')
if usedForAttack:
bounds = model.getBounds()
center = bounds.getCenter()
radius = bounds.getRadius()
sphere = CollisionSphere(center.getX(), center.getY(), center.getZ(), radius)
colNode = CollisionNode('BossZap')
colNode.setTag('attackCode', str(ToontownGlobals.BossCogLawyerAttack))
colNode.addSolid(sphere)
model.attachNewNode(colNode)
model.setTransparency(1)
model.setAlphaScale(0.5)
return model
def cleanupIntervals(self):
for interval in self.activeIntervals.values():
interval.finish()
self.activeIntervals = {}
def clearInterval(self, name, finish = 1):
if self.activeIntervals.has_key(name):
ival = self.activeIntervals[name]
if finish:
ival.finish()
else:
ival.pause()
if self.activeIntervals.has_key(name):
del self.activeIntervals[name]
else:
self.notify.debug('interval: %s already cleared' % name)
def setBossCogId(self, bossCogId):
self.bossCogId = bossCogId
self.boss = base.cr.doId2do[bossCogId]
def doStun(self):
self.notify.debug('doStun')
self.fsm.request('Stunned')
def enterPreThrowProsecute(self):
duration = ToontownGlobals.LawbotBossLawyerToPanTime
throwName = self.uniqueName('preThrowProsecute')
preThrowTrack = self.makePreThrowProsecuteTrack(self.prosecuteEvidence, duration, self.curTargetPt)
fullSequence = Sequence(preThrowTrack, Func(self.requestStateIfNotInFlux, 'PostThrowProsecute'), name=throwName)
self.activeIntervals[throwName] = fullSequence
fullSequence.start()
def exitPreThrowProsecute(self):
throwName = self.uniqueName('preThrowProsecute')
if self.activeIntervals.has_key(throwName):
self.activeIntervals[throwName].pause()
del self.activeIntervals[throwName]
def enterPostThrowProsecute(self):
duration = ToontownGlobals.LawbotBossLawyerToPanTime
throwName = self.uniqueName('postThrowProsecute')
postThrowTrack, self.flyingEvidenceTrack = self.makePostThrowProsecuteTrack(self.prosecuteEvidence, duration, self.curTargetPt)
fullSequence = Sequence(postThrowTrack, Func(self.requestStateIfNotInFlux, 'neutral'), name=throwName)
self.activeIntervals[throwName] = fullSequence
fullSequence.start()
flyName = self.uniqueName('flyingEvidence')
self.activeIntervals[flyName] = self.flyingEvidenceTrack
self.flyingEvidenceTrack.append(Func(self.finishedWithFlying, 'prosecute'))
self.flyingEvidenceTrack.start()
def exitPostThrowProsecute(self):
throwName = self.uniqueName('postThrowProsecute')
if self.activeIntervals.has_key(throwName):
self.activeIntervals[throwName].finish()
del self.activeIntervals[throwName]
def requestStateIfNotInFlux(self, state):
if not self.fsm._ClassicFSM__internalStateInFlux:
self.fsm.request(state)
def enterPreThrowAttack(self):
if self.attackEvidence == self.attackEvidenceA:
self.attackEvidence = self.attackEvidenceB
else:
self.attackEvidence = self.attackEvidenceA
duration = 3.0
throwName = self.uniqueName('preThrowAttack')
preThrowTrack = self.makePreThrowAttackTrack(self.attackEvidence, duration, self.curTargetPt)
fullSequence = Sequence(preThrowTrack, Func(self.requestStateIfNotInFlux, 'PostThrowAttack'), name=throwName)
self.activeIntervals[throwName] = fullSequence
fullSequence.start()
def exitPreThrowAttack(self):
throwName = self.uniqueName('preThrowAttack')
if self.activeIntervals.has_key(throwName):
self.activeIntervals[throwName].pause()
del self.activeIntervals[throwName]
def enterPostThrowAttack(self):
duration = 3.0
throwName = self.uniqueName('postThrowAttack')
postThrowTrack, self.flyingEvidenceTrack = self.makePostThrowAttackTrack(self.attackEvidence, duration, self.curTargetPt)
fullSequence = Sequence(postThrowTrack, Func(self.requestStateIfNotInFlux, 'neutral'), name=throwName)
self.notify.debug('duration of postThrowAttack = %f' % fullSequence.getDuration())
self.activeIntervals[throwName] = fullSequence
fullSequence.start()
flyName = self.uniqueName('flyingEvidence')
self.activeIntervals[flyName] = self.flyingEvidenceTrack
self.flyingEvidenceTrack.append(Func(self.finishedWithFlying, 'attack'))
self.flyingEvidenceTrack.start()
def finishedWithFlying(self, str):
self.notify.debug('finished flyingEvidenceTrack %s' % str)
def exitPostThrowAttack(self):
throwName = self.uniqueName('postThrowAttack')
if self.activeIntervals.has_key(throwName):
self.activeIntervals[throwName].finish()
del self.activeIntervals[throwName]
def enterStunned(self):
self.loop('lured', 0)
stunSequence = MovieUtil.createSuitStunInterval(self, 0, ToontownGlobals.LawbotBossLawyerStunTime)
seqName = stunSequence.getName()
stunSequence.append(Func(self.fsm.request, 'neutral'))
self.activeIntervals[seqName] = stunSequence
stunSequence.start()
def exitStunned(self):
self.prosecuteEvidence.hide()
self.attackEvidence.hide()
|
retr0h/ansible
|
refs/heads/devel
|
lib/ansible/inventory/ini.py
|
1
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
import ansible.constants as C
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.inventory.expand_hosts import detect_range
from ansible.inventory.expand_hosts import expand_hostname_range
from ansible import errors
import shlex
import re
import ast
class InventoryParser(object):
"""
Host inventory for ansible.
"""
def __init__(self, filename=C.DEFAULT_HOST_LIST):
with open(filename) as fh:
self.lines = fh.readlines()
self.groups = {}
self.hosts = {}
self._parse()
def _parse(self):
self._parse_base_groups()
self._parse_group_children()
self._parse_group_variables()
return self.groups
# [webservers]
# alpha
# beta:2345
# gamma sudo=True user=root
# delta asdf=jkl favcolor=red
def _parse_base_groups(self):
# FIXME: refactor
ungrouped = Group(name='ungrouped')
all = Group(name='all')
all.add_child_group(ungrouped)
self.groups = dict(all=all, ungrouped=ungrouped)
active_group_name = 'ungrouped'
for line in self.lines:
if line.startswith("["):
active_group_name = line.split(" #")[0].replace("[","").replace("]","").strip()
if line.find(":vars") != -1 or line.find(":children") != -1:
active_group_name = active_group_name.rsplit(":", 1)[0]
if active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
all.add_child_group(new_group)
active_group_name = None
elif active_group_name not in self.groups:
new_group = self.groups[active_group_name] = Group(name=active_group_name)
all.add_child_group(new_group)
elif line.startswith("#") or line.startswith(";") or line == '':
pass
elif active_group_name:
tokens = shlex.split(line.split(" #")[0])
if len(tokens) == 0:
continue
hostname = tokens[0]
port = C.DEFAULT_REMOTE_PORT
# Three cases to check:
# 0. A hostname that contains a range pesudo-code and a port
# 1. A hostname that contains just a port
if hostname.count(":") > 1:
# probably an IPv6 addresss, so check for the format
# XXX:XXX::XXX.port, otherwise we'll just assume no
# port is set
if hostname.find(".") != -1:
(hostname, port) = hostname.rsplit(".", 1)
elif (hostname.find("[") != -1 and
hostname.find("]") != -1 and
hostname.find(":") != -1 and
(hostname.rindex("]") < hostname.rindex(":")) or
(hostname.find("]") == -1 and hostname.find(":") != -1)):
(hostname, port) = hostname.rsplit(":", 1)
hostnames = []
if detect_range(hostname):
hostnames = expand_hostname_range(hostname)
else:
hostnames = [hostname]
for hn in hostnames:
host = None
if hn in self.hosts:
host = self.hosts[hn]
else:
host = Host(name=hn, port=port)
self.hosts[hn] = host
if len(tokens) > 1:
for t in tokens[1:]:
if t.startswith('#'):
break
try:
(k,v) = t.split("=")
except ValueError, e:
raise errors.AnsibleError("Invalid ini entry: %s - %s" % (t, str(e)))
try:
host.set_variable(k,ast.literal_eval(v))
except:
# most likely a string that literal_eval
# doesn't like, so just set it
host.set_variable(k,v)
self.groups[active_group_name].add_host(host)
# [southeast:children]
# atlanta
# raleigh
def _parse_group_children(self):
group = None
for line in self.lines:
line = line.strip()
if line is None or line == '':
continue
if line.startswith("[") and line.find(":children]") != -1:
line = line.replace("[","").replace(":children]","")
group = self.groups.get(line, None)
if group is None:
group = self.groups[line] = Group(name=line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif group:
kid_group = self.groups.get(line, None)
if kid_group is None:
raise errors.AnsibleError("child group is not defined: (%s)" % line)
else:
group.add_child_group(kid_group)
# [webservers:vars]
# http_port=1234
# maxRequestsPerChild=200
def _parse_group_variables(self):
group = None
for line in self.lines:
line = line.strip()
if line.startswith("[") and line.find(":vars]") != -1:
line = line.replace("[","").replace(":vars]","")
group = self.groups.get(line, None)
if group is None:
raise errors.AnsibleError("can't add vars to undefined group: %s" % line)
elif line.startswith("#") or line.startswith(";"):
pass
elif line.startswith("["):
group = None
elif line == '':
pass
elif group:
if line.find("=") == -1:
raise errors.AnsibleError("variables assigned to group must be in key=value form")
else:
(k, v) = [e.strip() for e in line.split("=", 1)]
# When the value is a single-quoted or double-quoted string
if re.match(r"^(['\"]).*\1$", v):
# Unquote the string
group.set_variable(k, re.sub(r"^['\"]|['\"]$", '', v))
else:
group.set_variable(k, v)
def get_host_variables(self, host):
return {}
|
hasadna/OpenCommunity
|
refs/heads/master
|
src/communities/notifications.py
|
1
|
"""Services for sending notifications to community members."""
import logging
import datetime
from itertools import chain
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.utils import translation
import django_rq
from communities.models import SendToOption
from users.default_roles import DefaultGroups
from issues.models import IssueStatus
logger = logging.getLogger(__name__)
def get_guests_emails(guests_text):
guest_emails = []
if guests_text:
for line in guests_text.splitlines():
if '[' in line:
from_idx = line.find('[')
to_idx = line.find(']', from_idx + 1)
try:
guest_emails.append(line[from_idx+1:to_idx])
except:
pass
return guest_emails
def construct_mock_users(email_list, type):
"""Takes a list of email addresses and a user type, and returns a
mock user object with just enough information to check for object
access control.
"""
class MockUser(object):
def __init__(self, user_dict):
for k, v in user_dict.items():
setattr(self, k, v)
users = []
for email in email_list:
user = {
'email': email,
'type': type,
'_is_mock': True,
'is_superuser': False
}
users.append(MockUser(user))
return users
def _base_send_mail(community, notification_type, sender, send_to, data=None,
base_url=None, with_guests=False, language=None):
"""Sends mail to community members, and applies object access control.
The type of email being sent is detected from notification_type.
"""
if language:
translation.activate(language)
# before anything, we want to build our recipient list as email
# will be personalized.
if send_to == SendToOption.ONLY_ME:
r = [sender]
elif send_to == SendToOption.ALL_MEMBERS:
r = [m.user for m in community.memberships.all()]
elif send_to == SendToOption.BOARD_ONLY:
r = [m.user for m in community.memberships.board()]
elif send_to == SendToOption.ONLY_ATTENDEES:
r = [user for user in community.upcoming_meeting_participants.all()]
else:
r = []
logger.error('Received an email job with no valid send_to. '
'send_to: {0}.'.format(send_to))
user_recipients = set(r)
w = []
if send_to != SendToOption.ONLY_ME:
# Add guests to the watcher_recipients list if applicable
if with_guests:
guests_text = community.upcoming_meeting_guests
guest_emails = get_guests_emails(guests_text)
guests = construct_mock_users(guest_emails, 'guest')
w.extend(guests)
# Add system managers to the watcher_recipients list if applicable
if community.inform_system_manager and \
notification_type in ('agenda', 'protocol', 'protocol_draft'):
manager_emails = [manager[1] for manager in settings.MANAGERS]
managers = construct_mock_users(manager_emails, 'managers')
w.extend(managers)
# Add pending invitees to the watcher_recipients list if applicable
if community.email_invitees:
# pending invites to board only
if send_to == SendToOption.BOARD_ONLY:
invitees = [i for i in community.invitations.exclude(
default_group_name=DefaultGroups.MEMBER)]
# All pending invites
elif send_to == SendToOption.ALL_MEMBERS:
invitees = [i for i in community.invitations.all()]
w.extend(invitees)
watcher_recipients = set(w)
# Make a union of the two sets to create the actual recipient list
recipients = user_recipients | watcher_recipients
if not base_url:
base_url = settings.HOST_URL
d = data.copy() if data else {}
d.update({
'base_url': base_url,
'community': community,
'LANGUAGE_CODE': settings.LANGUAGE_CODE,
'MEDIA_URL': settings.MEDIA_URL,
'STATIC_URL': settings.STATIC_URL,
})
from_email = "%s <%s>" % (community.name, settings.FROM_EMAIL)
for recipient in recipients:
# TODO: All this logic for populating the context is basically copied
# from the same code in the views. This is not ideal, but without
# doing a refactor over great parts of the system it seems reasonable.
if notification_type == 'protocol_draft':
meeting_time = community.upcoming_meeting_scheduled_at
if not meeting_time:
meeting_time = datetime.datetime.now()
draft_agenda_payload = []
issue_status = IssueStatus.IS_UPCOMING
issues = community.issues.object_access_control(
user=recipient, community=community).filter(
active=True, status__in=(issue_status)).order_by(
'order_in_upcoming_meeting')
for issue in issues:
proposals = issue.proposals.object_access_control(
user=recipient, community=community)
draft_agenda_payload.append({'issue': issue, 'proposals': proposals})
agenda_items = community.draft_agenda(draft_agenda_payload)
item_attachments = [item['issue'].current_attachments() for
item in agenda_items]
d.update({
'recipient': recipient,
'meeting_time': meeting_time.replace(second=0),
'agenda_items': agenda_items,
'attachments': list(chain.from_iterable(item_attachments))
})
elif notification_type == 'protocol':
agenda_items = d['meeting'].agenda.object_access_control(
user=recipient, community=community).all()
# restrict the proposals of each agenda item
for ai in agenda_items:
ai.accepted_proposals = ai.accepted_proposals(
user=recipient, community=community)
ai.rejected_proposals = ai.rejected_proposals(
user=recipient, community=community)
ai.proposals = ai.proposals(
user=recipient, community=community)
d.update({
'recipient': recipient,
'agenda_items': agenda_items,
})
elif notification_type == 'agenda':
can_straw_vote = community.upcoming_proposals_any(
{'is_open': True}, user=recipient, community=community)\
and community.upcoming_meeting_is_published
upcoming_issues = community.upcoming_issues(user=recipient,
community=community)
issues = []
for i in upcoming_issues:
proposals = i.proposals.object_access_control(
user=recipient, community=community)
issues.append({'issue': i, 'proposals': proposals})
d.update({
'recipient': recipient,
'can_straw_vote': can_straw_vote,
'issue_container': issues
})
msg = {}
msg['subject'] = render_to_string("emails/{0}_title.txt".format(
notification_type), d).strip()
msg['body'] = render_to_string("emails/{0}.txt".format(notification_type), d)
as_html = render_to_string("emails/{0}.html".format(
notification_type), d)
msg['from_email'] = from_email
msg['to'] = [recipient.email]
msg = dict((k, v) for k, v in msg.iteritems() if v)
message = EmailMultiAlternatives(**msg)
message.attach_alternative(as_html, 'text/html')
message.send()
return len(recipients)
def _async_send_mail(*args, **kwargs):
django_rq.get_queue(settings.QUEUE_NAME).enqueue(
_base_send_mail, *args, description=u"Send mail",
language=settings.LANGUAGE_CODE, **kwargs)
return True
if not settings.OPENCOMMUNITY_ASYNC_NOTIFICATIONS:
send_mail = _base_send_mail
else:
send_mail = _async_send_mail
|
videetssinghai/Blog-Rest-Api
|
refs/heads/master
|
lib/python2.7/site-packages/pip/utils/build.py
|
899
|
from __future__ import absolute_import
import os.path
import tempfile
from pip.utils import rmtree
class BuildDirectory(object):
def __init__(self, name=None, delete=None):
# If we were not given an explicit directory, and we were not given an
# explicit delete option, then we'll default to deleting.
if name is None and delete is None:
delete = True
if name is None:
# We realpath here because some systems have their default tmpdir
# symlinked to another directory. This tends to confuse build
# scripts, so we canonicalize the path by traversing potential
# symlinks here.
name = os.path.realpath(tempfile.mkdtemp(prefix="pip-build-"))
# If we were not given an explicit directory, and we were not given
# an explicit delete option, then we'll default to deleting.
if delete is None:
delete = True
self.name = name
self.delete = delete
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
self.cleanup()
def cleanup(self):
if self.delete:
rmtree(self.name)
|
rmk135/objects
|
refs/heads/master
|
examples/miniapps/decoupled-packages/example/photo/repositories.py
|
2
|
"""Photo repositories module."""
class PhotoRepository:
def __init__(self, entity_factory, fs, db):
self.entity_factory = entity_factory
self.fs = fs
self.db = db
def get_photos(self, user_id):
return [self.entity_factory() for _ in range(user_id*5)]
|
Aeva/waterworks
|
refs/heads/master
|
waterworks/waterworks.py
|
1
|
# This file is part of Waterworks
#
# Waterworks is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Waterworks is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Waterworks. If not, see <http://www.gnu.org/licenses/>.
import glob
import imp
import os
import sys
import traceback
import plugins
from protoprotocol import ProtoProtocol
class WaterWorks(object):
def __init__(self):
_plugins = self.get_plugins()
print "Plugins found:", _plugins
pass
def get_plugins(self):
plugin_classes = []
base_dir = os.path.dirname(plugins.__file__)
plugin_paths = glob.glob(os.path.join(base_dir, "*.py"))
for path in plugin_paths:
plugin_name = os.path.split(path)[-1][:-3]
try:
plugin = imp.load_source(plugin_name, path)
except:
print "!!! Failed to load plugin:", path
traceback.print_exc()
print ""
continue
plugin_attrs = [getattr(plugin, attr) for attr in dir(plugin)]
plugin_classes = []
for attr in plugin_attrs:
try:
if issubclass(attr, ProtoProtocol) and attr.is_available():
plugin_classes.append(attr)
except TypeError:
continue
return plugin_classes
def start_daemon(*args):
client = WaterWorks()
|
kuangrewawa/OnosFw
|
refs/heads/master
|
tools/test/topos/chordal.py
|
41
|
"""
"""
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.node import Node
from mininet.node import CPULimitedHost
from mininet.link import TCLink
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.util import dumpNodeConnections
class chordalTopo( Topo ):
def __init__( self, **opts ):
"Create a topology."
# Initialize Topology
Topo.__init__( self, **opts )
# add nodes, switches first...
s1 = self.addSwitch( 's1' )
s2 = self.addSwitch( 's2' )
s3 = self.addSwitch( 's3' )
s4 = self.addSwitch( 's4' )
s5 = self.addSwitch( 's5' )
s6 = self.addSwitch( 's6' )
s7 = self.addSwitch( 's7' )
s8 = self.addSwitch( 's8' )
s9 = self.addSwitch( 's9' )
s10 = self.addSwitch( 's10' )
s11 = self.addSwitch( 's11' )
s12 = self.addSwitch( 's12' )
s13 = self.addSwitch( 's13' )
s14 = self.addSwitch( 's14' )
s15 = self.addSwitch( 's15' )
s16 = self.addSwitch( 's16' )
s17 = self.addSwitch( 's17' )
s18 = self.addSwitch( 's18' )
s19 = self.addSwitch( 's19' )
s20 = self.addSwitch( 's20' )
s21 = self.addSwitch( 's21' )
s22 = self.addSwitch( 's22' )
s23 = self.addSwitch( 's23' )
s24 = self.addSwitch( 's24' )
s25 = self.addSwitch( 's25' )
# ... and now hosts
s1_host = self.addHost( 'h1' )
s2_host = self.addHost( 'h2' )
s3_host = self.addHost( 'h3' )
s4_host = self.addHost( 'h4' )
s5_host = self.addHost( 'h5' )
s6_host = self.addHost( 'h6' )
s7_host = self.addHost( 'h7' )
s8_host = self.addHost( 'h8' )
s9_host = self.addHost( 'h9' )
s10_host = self.addHost( 'h10' )
s11_host = self.addHost( 'h11' )
s12_host = self.addHost( 'h12' )
s13_host = self.addHost( 'h13' )
s14_host = self.addHost( 'h14' )
s15_host = self.addHost( 'h15' )
s16_host = self.addHost( 'h16' )
s17_host = self.addHost( 'h17' )
s18_host = self.addHost( 'h18' )
s19_host = self.addHost( 'h19' )
s20_host = self.addHost( 'h20' )
s21_host = self.addHost( 'h21' )
s22_host = self.addHost( 'h22' )
s23_host = self.addHost( 'h23' )
s24_host = self.addHost( 'h24' )
s25_host = self.addHost( 'h25' )
# add edges between switch and corresponding host
self.addLink( s1 , s1_host )
self.addLink( s2 , s2_host )
self.addLink( s3 , s3_host )
self.addLink( s4 , s4_host )
self.addLink( s5 , s5_host )
self.addLink( s6 , s6_host )
self.addLink( s7 , s7_host )
self.addLink( s8 , s8_host )
self.addLink( s9 , s9_host )
self.addLink( s10 , s10_host )
self.addLink( s11 , s11_host )
self.addLink( s12 , s12_host )
self.addLink( s13 , s13_host )
self.addLink( s14 , s14_host )
self.addLink( s15 , s15_host )
self.addLink( s16 , s16_host )
self.addLink( s17 , s17_host )
self.addLink( s18 , s18_host )
self.addLink( s19 , s19_host )
self.addLink( s20 , s20_host )
self.addLink( s21 , s21_host )
self.addLink( s22 , s22_host )
self.addLink( s23 , s23_host )
self.addLink( s24 , s24_host )
self.addLink( s25 , s25_host )
self.addLink(s1, s2)
self.addLink(s1, s3)
self.addLink(s1, s4)
self.addLink(s1, s5)
self.addLink(s1, s6)
self.addLink(s1, s7)
self.addLink(s1, s8)
self.addLink(s1, s9)
self.addLink(s1, s10)
self.addLink(s1, s11)
self.addLink(s1, s12)
self.addLink(s1, s13)
self.addLink(s1, s14)
self.addLink(s1, s15)
self.addLink(s1, s16)
self.addLink(s1, s17)
self.addLink(s1, s18)
self.addLink(s1, s19)
self.addLink(s1, s20)
self.addLink(s1, s21)
self.addLink(s1, s22)
self.addLink(s1, s23)
self.addLink(s1, s24)
self.addLink(s1, s25)
self.addLink(s2, s3)
self.addLink(s2, s4)
self.addLink(s2, s5)
self.addLink(s2, s6)
self.addLink(s2, s7)
self.addLink(s2, s8)
self.addLink(s2, s9)
self.addLink(s2, s10)
self.addLink(s2, s11)
self.addLink(s2, s12)
self.addLink(s2, s13)
self.addLink(s2, s14)
self.addLink(s2, s15)
self.addLink(s2, s16)
self.addLink(s2, s17)
self.addLink(s2, s18)
self.addLink(s2, s19)
self.addLink(s2, s20)
self.addLink(s2, s21)
self.addLink(s2, s22)
self.addLink(s2, s23)
self.addLink(s2, s24)
self.addLink(s2, s25)
self.addLink(s3, s4)
self.addLink(s3, s5)
self.addLink(s3, s6)
self.addLink(s3, s7)
self.addLink(s3, s8)
self.addLink(s3, s9)
self.addLink(s3, s10)
self.addLink(s3, s11)
self.addLink(s3, s12)
self.addLink(s3, s13)
self.addLink(s3, s14)
self.addLink(s3, s15)
self.addLink(s3, s16)
self.addLink(s3, s17)
self.addLink(s3, s18)
self.addLink(s3, s19)
self.addLink(s3, s20)
self.addLink(s3, s21)
self.addLink(s3, s22)
self.addLink(s3, s23)
self.addLink(s3, s24)
self.addLink(s3, s25)
self.addLink(s4, s5)
self.addLink(s4, s6)
self.addLink(s4, s7)
self.addLink(s4, s8)
self.addLink(s4, s9)
self.addLink(s4, s10)
self.addLink(s4, s11)
self.addLink(s4, s12)
self.addLink(s4, s13)
self.addLink(s4, s14)
self.addLink(s4, s15)
self.addLink(s4, s16)
self.addLink(s4, s17)
self.addLink(s4, s18)
self.addLink(s4, s19)
self.addLink(s4, s20)
self.addLink(s4, s21)
self.addLink(s4, s22)
self.addLink(s4, s23)
self.addLink(s4, s24)
self.addLink(s4, s25)
self.addLink(s5, s6)
self.addLink(s5, s7)
self.addLink(s5, s8)
self.addLink(s5, s9)
self.addLink(s5, s10)
self.addLink(s5, s11)
self.addLink(s5, s12)
self.addLink(s5, s13)
self.addLink(s5, s14)
self.addLink(s5, s15)
self.addLink(s5, s16)
self.addLink(s5, s17)
self.addLink(s5, s18)
self.addLink(s5, s19)
self.addLink(s5, s20)
self.addLink(s5, s21)
self.addLink(s5, s22)
self.addLink(s5, s23)
self.addLink(s5, s24)
self.addLink(s5, s25)
self.addLink(s6, s7)
self.addLink(s6, s8)
self.addLink(s6, s9)
self.addLink(s6, s10)
self.addLink(s6, s11)
self.addLink(s6, s12)
self.addLink(s6, s13)
self.addLink(s6, s14)
self.addLink(s6, s15)
self.addLink(s6, s16)
self.addLink(s6, s17)
self.addLink(s6, s18)
self.addLink(s6, s19)
self.addLink(s6, s20)
self.addLink(s6, s21)
self.addLink(s6, s22)
self.addLink(s6, s23)
self.addLink(s6, s24)
self.addLink(s6, s25)
self.addLink(s7, s8)
self.addLink(s7, s9)
self.addLink(s7, s10)
self.addLink(s7, s11)
self.addLink(s7, s12)
self.addLink(s7, s13)
self.addLink(s7, s14)
self.addLink(s7, s15)
self.addLink(s7, s16)
self.addLink(s7, s17)
self.addLink(s7, s18)
self.addLink(s7, s19)
self.addLink(s7, s20)
self.addLink(s7, s21)
self.addLink(s7, s22)
self.addLink(s7, s23)
self.addLink(s7, s24)
self.addLink(s7, s25)
self.addLink(s8, s9)
self.addLink(s8, s10)
self.addLink(s8, s11)
self.addLink(s8, s12)
self.addLink(s8, s13)
self.addLink(s8, s14)
self.addLink(s8, s15)
self.addLink(s8, s16)
self.addLink(s8, s17)
self.addLink(s8, s18)
self.addLink(s8, s19)
self.addLink(s8, s20)
self.addLink(s8, s21)
self.addLink(s8, s22)
self.addLink(s8, s23)
self.addLink(s8, s24)
self.addLink(s8, s25)
self.addLink(s9, s10)
self.addLink(s9, s11)
self.addLink(s9, s12)
self.addLink(s9, s13)
self.addLink(s9, s14)
self.addLink(s9, s15)
self.addLink(s9, s16)
self.addLink(s9, s17)
self.addLink(s9, s18)
self.addLink(s9, s19)
self.addLink(s9, s20)
self.addLink(s9, s21)
self.addLink(s9, s22)
self.addLink(s9, s23)
self.addLink(s9, s24)
self.addLink(s9, s25)
self.addLink(s10, s11)
self.addLink(s10, s12)
self.addLink(s10, s13)
self.addLink(s10, s14)
self.addLink(s10, s15)
self.addLink(s10, s16)
self.addLink(s10, s17)
self.addLink(s10, s18)
self.addLink(s10, s19)
self.addLink(s10, s20)
self.addLink(s10, s21)
self.addLink(s10, s22)
self.addLink(s10, s23)
self.addLink(s10, s24)
self.addLink(s10, s25)
self.addLink(s11, s12)
self.addLink(s11, s13)
self.addLink(s11, s14)
self.addLink(s11, s15)
self.addLink(s11, s16)
self.addLink(s11, s17)
self.addLink(s11, s18)
self.addLink(s11, s19)
self.addLink(s11, s20)
self.addLink(s11, s21)
self.addLink(s11, s22)
self.addLink(s11, s23)
self.addLink(s11, s24)
self.addLink(s11, s25)
self.addLink(s12, s13)
self.addLink(s12, s14)
self.addLink(s12, s15)
self.addLink(s12, s16)
self.addLink(s12, s17)
self.addLink(s12, s18)
self.addLink(s12, s19)
self.addLink(s12, s20)
self.addLink(s12, s21)
self.addLink(s12, s22)
self.addLink(s12, s23)
self.addLink(s12, s24)
self.addLink(s12, s25)
self.addLink(s13, s14)
self.addLink(s13, s15)
self.addLink(s13, s16)
self.addLink(s13, s17)
self.addLink(s13, s18)
self.addLink(s13, s19)
self.addLink(s13, s20)
self.addLink(s13, s21)
self.addLink(s13, s22)
self.addLink(s13, s23)
self.addLink(s13, s24)
self.addLink(s13, s25)
self.addLink(s14, s15)
self.addLink(s14, s16)
self.addLink(s14, s17)
self.addLink(s14, s18)
self.addLink(s14, s19)
self.addLink(s14, s20)
self.addLink(s14, s21)
self.addLink(s14, s22)
self.addLink(s14, s23)
self.addLink(s14, s24)
self.addLink(s14, s25)
self.addLink(s15, s16)
self.addLink(s15, s17)
self.addLink(s15, s18)
self.addLink(s15, s19)
self.addLink(s15, s20)
self.addLink(s15, s21)
self.addLink(s15, s22)
self.addLink(s15, s23)
self.addLink(s15, s24)
self.addLink(s15, s25)
self.addLink(s16, s17)
self.addLink(s16, s18)
self.addLink(s16, s19)
self.addLink(s16, s20)
self.addLink(s16, s21)
self.addLink(s16, s22)
self.addLink(s16, s23)
self.addLink(s16, s24)
self.addLink(s16, s25)
self.addLink(s17, s18)
self.addLink(s17, s19)
self.addLink(s17, s20)
self.addLink(s17, s21)
self.addLink(s17, s22)
self.addLink(s17, s23)
self.addLink(s17, s24)
self.addLink(s17, s25)
self.addLink(s18, s19)
self.addLink(s18, s20)
self.addLink(s18, s21)
self.addLink(s18, s22)
self.addLink(s18, s23)
self.addLink(s18, s24)
self.addLink(s18, s25)
self.addLink(s19, s20)
self.addLink(s19, s21)
self.addLink(s19, s22)
self.addLink(s19, s23)
self.addLink(s19, s24)
self.addLink(s19, s25)
self.addLink(s20, s21)
self.addLink(s20, s22)
self.addLink(s20, s23)
self.addLink(s20, s24)
self.addLink(s20, s25)
self.addLink(s21, s22)
self.addLink(s21, s23)
self.addLink(s21, s24)
self.addLink(s21, s25)
self.addLink(s22, s23)
self.addLink(s22, s24)
self.addLink(s22, s25)
self.addLink(s23, s24)
self.addLink(s23, s25)
self.addLink(s24, s25)
topos = { 'chordal': ( lambda: chordalTopo() ) }
|
CINPLA/expipe-dev
|
refs/heads/master
|
exana/exana/tracking/fields.py
|
1
|
import numpy as np
import quantities as pq
def spatial_rate_map(x, y, t, sptr, binsize=0.01*pq.m, box_xlen=1*pq.m,
box_ylen=1*pq.m, mask_unvisited=True, convolve=True,
return_bins=False, smoothing=0.02):
"""Divide a 2D space in bins of size binsize**2, count the number of spikes
in each bin and divide by the time spent in respective bins. The map can
then be convolved with a gaussian kernel of size csize determined by the
smoothing factor, binsize and box_xlen.
Parameters
----------
sptr : neo.SpikeTrain
x : quantities.Quantity array in m
1d vector of x positions
y : quantities.Quantity array in m
1d vector of y positions
t : quantities.Quantity array in s
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : quantities scalar in m
side length of quadratic box
mask_unvisited: bool
mask bins which has not been visited by nans
convolve : bool
convolve the rate map with a 2D Gaussian kernel
Returns
-------
out : rate map
if return_bins = True
out : rate map, xbins, ybins
"""
from exana.misc.tools import is_quantities
if not all([len(var) == len(var2) for var in [x,y,t] for var2 in [x,y,t]]):
raise ValueError('x, y, t must have same number of elements')
if box_xlen < x.max() or box_ylen < y.max():
raise ValueError('box length must be larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals)
remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0 or remaindery != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
is_quantities([x, y, t], 'vector')
is_quantities(binsize, 'scalar')
t = t.rescale('s')
box_xlen = box_xlen.rescale('m').magnitude
box_ylen = box_ylen.rescale('m').magnitude
binsize = binsize.rescale('m').magnitude
x = x.rescale('m').magnitude
y = y.rescale('m').magnitude
# interpolate one extra timepoint
t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))]) * pq.s
spikes_in_bin, _ = np.histogram(sptr.times, t_)
time_in_bin = np.diff(t_.magnitude)
xbins = np.arange(0, box_xlen + binsize, binsize)
ybins = np.arange(0, box_ylen + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
iy = np.digitize(y, ybins, right=True)
spike_pos = np.zeros((xbins.size, ybins.size))
time_pos = np.zeros((xbins.size, ybins.size))
for n in range(len(x)):
spike_pos[ix[n], iy[n]] += spikes_in_bin[n]
time_pos[ix[n], iy[n]] += time_in_bin[n]
# correct for shifting of map
spike_pos = spike_pos[1:, 1:]
time_pos = time_pos[1:, 1:]
with np.errstate(divide='ignore', invalid='ignore'):
rate = np.divide(spike_pos, time_pos)
if convolve:
rate[np.isnan(rate)] = 0. # for convolution
from astropy.convolution import Gaussian2DKernel, convolve_fft
csize = (box_xlen / binsize) * smoothing
kernel = Gaussian2DKernel(csize)
rate = convolve_fft(rate, kernel) # TODO edge correction
if mask_unvisited:
was_in_bin = np.asarray(time_pos, dtype=bool)
rate[np.invert(was_in_bin)] = np.nan
if return_bins:
return rate.T, xbins, ybins
else:
return rate.T
def gridness(rate_map, box_xlen, box_ylen, return_acorr=False,
step_size=0.1*pq.m):
'''Calculates gridness of a rate map. Calculates the normalized
autocorrelation (A) of a rate map B where A is given as
A = 1/n\Sum_{x,y}(B - \bar{B})^{2}/\sigma_{B}^{2}. Further, the Pearsson's
product-moment correlation coefficients is calculated between A and A_{rot}
rotated 30 and 60 degrees. Finally the gridness is calculated as the
difference between the minimum of coefficients at 60 degrees and the
maximum of coefficients at 30 degrees i.e. gridness = min(r60) - max(r30).
In order to focus the analysis on symmetry of A the the central and the
outer part of the gridness is maximized by increasingly mask A at steps of
``step_size``. This function is inspired by Lukas Solankas gridcells
package from Matt Nolans lab.
Parameters
----------
rate_map : numpy.ndarray
box_xlen : quantities scalar in m
side length of quadratic box
step_size : quantities scalar in m
step size in masking
return_acorr : bool
return autocorrelation map or not
Returns
-------
out : gridness, (autocorrelation map)
'''
from scipy.ndimage.interpolation import rotate
import numpy.ma as ma
from exana.misc.tools import (is_quantities, fftcorrelate2d,
masked_corrcoef2d)
is_quantities([box_xlen, box_ylen, step_size], 'scalar')
box_xlen = box_xlen.rescale('m').magnitude
box_ylen = box_ylen.rescale('m').magnitude
step_size = step_size.rescale('m').magnitude
tmp_map = rate_map.copy()
tmp_map[~np.isfinite(tmp_map)] = 0
acorr = fftcorrelate2d(tmp_map, tmp_map, mode='full', normalize=True)
rows, cols = acorr.shape
b_x = np.linspace(-box_xlen/2., box_xlen/2., rows)
b_y = np.linspace(-box_ylen/2., box_ylen/2., cols)
B_x, B_y = np.meshgrid(b_x, b_y)
grids = []
acorrs = []
# TODO find size of middle gaussian and exclude
for outer in np.arange(box_xlen/4, box_xlen/2, step_size):
m_acorr = ma.masked_array(acorr, mask=np.sqrt(B_x**2 + B_y**2) > outer)
for inner in np.arange(0, box_xlen/4, step_size):
m_acorr = \
ma.masked_array(m_acorr, mask=np.sqrt(B_x**2 + B_y**2) < inner)
angles = range(30, 180+30, 30)
corr = []
# Rotate and compute correlation coefficient
for angle in angles:
rot_acorr = rotate(m_acorr, angle, reshape=False)
corr.append(masked_corrcoef2d(rot_acorr, m_acorr)[0, 1])
r60 = corr[1::2]
r30 = corr[::2]
grids.append(np.min(r60) - np.max(r30))
acorrs.append(m_acorr)
if return_acorr:
return max(grids), acorr, # acorrs[grids.index(max(grids))]
else:
return max(grids)
def occupancy_map(x, y, t,
binsize=0.01*pq.m,
box_xlen=1*pq.m,
box_ylen=1*pq.m,
convolve=True,
return_bins=False,
smoothing=0.02):
'''Divide a 2D space in bins of size binsize**2, count the time spent
in each bin. The map can be convolved with a gaussian kernel of size
csize determined by the smoothing factor, binsize and box_xlen.
Parameters
----------
x : quantities.Quantity array in m
1d vector of x positions
y : quantities.Quantity array in m
1d vector of y positions
t : quantities.Quantity array in s
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : quantities scalar in m
side length of quadratic box
mask_unvisited: bool
mask bins which has not been visited by nans
convolve : bool
convolve the rate map with a 2D Gaussian kernel
Returns
-------
occupancy_map : numpy.ndarray
if return_bins = True
out : occupancy_map, xbins, ybins
'''
from exana.misc.tools import is_quantities
if not all([len(var) == len(var2) for var in [
x, y, t] for var2 in [x, y, t]]):
raise ValueError('x, y, t must have same number of elements')
if box_xlen < x.max() or box_ylen < y.max():
raise ValueError(
'box length must be larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals)
remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0 or remaindery != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
is_quantities([x, y, t], 'vector')
is_quantities(binsize, 'scalar')
t = t.rescale('s')
box_xlen = box_xlen.rescale('m').magnitude
box_ylen = box_ylen.rescale('m').magnitude
binsize = binsize.rescale('m').magnitude
x = x.rescale('m').magnitude
y = y.rescale('m').magnitude
# interpolate one extra timepoint
t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))]) * pq.s
time_in_bin = np.diff(t_.magnitude)
xbins = np.arange(0, box_xlen + binsize, binsize)
ybins = np.arange(0, box_ylen + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
iy = np.digitize(y, ybins, right=True)
time_pos = np.zeros((xbins.size, ybins.size))
for n in range(len(x)):
time_pos[ix[n], iy[n]] += time_in_bin[n]
# correct for shifting of map since digitize returns values at right edges
time_pos = time_pos[1:, 1:]
if convolve:
from astropy.convolution import Gaussian2DKernel, convolve_fft
csize = (box_xlen / binsize) * smoothing
kernel = Gaussian2DKernel(csize)
time_pos = convolve_fft(time_pos, kernel) # TODO edge correction
if return_bins:
return time_pos.T, xbins, ybins
else:
return time_pos.T
def nvisits_map(x, y, t,
binsize=0.01*pq.m,
box_xlen=1*pq.m,
box_ylen=1*pq.m,
return_bins=False):
'''Divide a 2D space in bins of size binsize**2, count the
number of visits in each bin. The map can be convolved with
a gaussian kernel of size determined by the smoothing factor,
binsize and box_xlen.
Parameters
----------
x : quantities.Quantity array in m
1d vector of x positions
y : quantities.Quantity array in m
1d vector of y positions
t : quantities.Quantity array in s
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : quantities scalar in m
side length of quadratic box
Returns
-------
nvisits_map : numpy.ndarray
if return_bins = True
out : nvisits_map, xbins, ybins
'''
from exana.misc.tools import is_quantities
if not all([len(var) == len(var2) for var in [
x, y, t] for var2 in [x, y, t]]):
raise ValueError('x, y, t must have same number of elements')
if box_xlen < x.max() or box_ylen < y.max():
raise ValueError(
'box length must be larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals)
remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0 or remaindery != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
is_quantities([x, y, t], 'vector')
is_quantities(binsize, 'scalar')
t = t.rescale('s')
box_xlen = box_xlen.rescale('m').magnitude
box_ylen = box_ylen.rescale('m').magnitude
binsize = binsize.rescale('m').magnitude
x = x.rescale('m').magnitude
y = y.rescale('m').magnitude
xbins = np.arange(0, box_xlen + binsize, binsize)
ybins = np.arange(0, box_ylen + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
iy = np.digitize(y, ybins, right=True)
nvisits_map = np.zeros((xbins.size, ybins.size))
for n in range(len(x)):
if n == 0:
nvisits_map[ix[n], iy[n]] = 1
else:
if ix[n-1] != ix[n] or iy[n-1] != iy[n]:
nvisits_map[ix[n], iy[n]] += 1
# correct for shifting of map since digitize returns values at right edges
nvisits_map = nvisits_map[1:, 1:]
if return_bins:
return nvisits_map.T, xbins, ybins
else:
return nvisits_map.T
def spatial_rate_map_1d(x, t, sptr,
binsize=0.01*pq.m,
track_len=1*pq.m,
mask_unvisited=True,
convolve=True,
return_bins=False,
smoothing=0.02):
"""Take x coordinates of linear track data, divide in bins of binsize,
count the number of spikes in each bin and divide by the time spent in
respective bins. The map can then be convolved with a gaussian kernel of
size csize determined by the smoothing factor, binsize and box_xlen.
Parameters
----------
sptr : neo.SpikeTrain
x : quantities.Quantity array in m
1d vector of x positions
t : quantities.Quantity array in s
1d vector of times at x, y positions
binsize : float
spatial binsize
box_xlen : quantities scalar in m
side length of quadratic box
mask_unvisited: bool
mask bins which has not been visited by nans
convolve : bool
convolve the rate map with a 2D Gaussian kernel
Returns
-------
out : rate map
if return_bins = True
out : rate map, xbins
"""
from exana.misc.tools import is_quantities
if not all([len(var) == len(var2) for var in [x, t] for var2 in [x, t]]):
raise ValueError('x, t must have same number of elements')
if track_len < x.max():
raise ValueError('track length must be\
larger or equal to max path length')
from decimal import Decimal as dec
decimals = 1e10
remainderx = dec(float(track_len)*decimals) % dec(float(binsize)*decimals)
if remainderx != 0:
raise ValueError('the remainder should be zero i.e. the ' +
'box length should be an exact multiple ' +
'of the binsize')
is_quantities([x, t], 'vector')
is_quantities(binsize, 'scalar')
t = t.rescale('s')
track_len = track_len.rescale('m').magnitude
binsize = binsize.rescale('m').magnitude
x = x.rescale('m').magnitude
# interpolate one extra timepoint
t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))]) * pq.s
spikes_in_bin, _ = np.histogram(sptr.times, t_)
time_in_bin = np.diff(t_.magnitude)
xbins = np.arange(0, track_len + binsize, binsize)
ix = np.digitize(x, xbins, right=True)
spike_pos = np.zeros(xbins.size)
time_pos = np.zeros(xbins.size)
for n in range(len(x)):
spike_pos[ix[n]] += spikes_in_bin[n]
time_pos[ix[n]] += time_in_bin[n]
# correct for shifting of map since digitize returns values at right edges
spike_pos = spike_pos[1:]
time_pos = time_pos[1:]
with np.errstate(divide='ignore', invalid='ignore'):
rate = np.divide(spike_pos, time_pos)
if convolve:
rate[np.isnan(rate)] = 0. # for convolution
from astropy.convolution import Gaussian2DKernel, convolve_fft
csize = (track_len / binsize) * smoothing
kernel = Gaussian2DKernel(csize)
rate = convolve_fft(rate, kernel) # TODO edge correction
if mask_unvisited:
was_in_bin = np.asarray(time_pos, dtype=bool)
rate[np.invert(was_in_bin)] = np.nan
if return_bins:
return rate.T, xbins
else:
return rate.T
def separate_fields(rate_map, laplace_thrsh = 0, center_method = 'maxima',
cutoff_method='none', box_xlen=1*pq.m,
box_ylen=1*pq.m,index=False):
"""Separates fields using the laplacian to identify fields separated by
a negative second derivative.
Parameters
----------
rate_map : np 2d array
firing rate in each bin
laplace_thrsh : float
value of laplacian to separate fields by relative to the minima. Should be
on the interval 0 to 1, where 0 cuts off at 0 and 1 cuts off at
min(laplace(rate_map)). Default 0.
center_method : string
method to find field centers. Valid options = ['center_of_mass',
'maxima','gaussian_fit']
cutoff_method (optional) : string or function
function to exclude small fields. If local field value of function
is lower than global function value, the field is excluded. Valid
string_options = ['median', 'mean','none'].
index : bool, default False
return bump center values as index or xy-pos
Returns
-------
fields : numpy array, shape like rate_map.
contains areas all filled with same value, corresponding to fields
in rate_map. The values are in range(1,nFields + 1), sorted by size of the
field (sum of all field values). 0 elsewhere.
n_field : int
field count
bump_centers : (n_field x 2) np ndarray
Coordinates of field centers
"""
cutoff_functions = {'mean':np.mean, 'median':np.median, 'none':None}
if not callable(cutoff_method):
try:
cutoff_func = cutoff_functions[cutoff_method]
except KeyError:
msg = "invalid cutoff_method flag '%s'" % cutoff_method
raise ValueError(msg)
else:
cutoff_func = cutoff_method
from scipy import ndimage
l = ndimage.laplace(rate_map)
l[l>laplace_thrsh*np.min(l)] = 0
# Labels areas of the laplacian not connected by values > 0.
fields, n_fields = ndimage.label(l)
# index 0 is the background
indx = np.arange(1,n_fields+1)
# Use cutoff method to remove unwanted fields
if cutoff_method != 'none':
try:
total_value = cutoff_func(fields)
except:
print('Unexpected error, cutoff_func doesnt like the input:')
raise
field_values = ndimage.labeled_comprehension(rate_map, fields, indx,
cutoff_func, float, 0)
try:
is_field = field_values >= total_value
except:
print('cutoff_func return_values doesnt want to compare:')
raise
if np.sum(is_field) == 0:
return np.zeros(rate_map.shape), 0, np.array([[],[]])
for i in indx:
if not is_field[i-1]:
fields[fields == i] = 0
n_fields = ndimage.label(fields, output=fields)
indx = np.arange(1,n_fields + 1)
# Sort by largest mean
sizes = ndimage.labeled_comprehension(rate_map, fields, indx,
np.mean, float, 0)
size_sort = np.argsort(sizes)[::-1]
new = np.zeros_like(fields)
for i in np.arange(n_fields):
new[fields == size_sort[i]+1] = i+1
fields = new
bc = get_bump_centers(rate_map,labels=fields,ret_index=index,indices=indx,method=center_method,
units=box_xlen.units)
# TODO exclude fields where maxima is on the edge of the field?
return fields, n_fields, bc
def get_bump_centers(rate_map, labels, ret_index=False, indices=None, method='maxima',
units=1*pq.m):
"""Finds center of fields at labels."""
from scipy import ndimage
if method not in ['maxima','center_of_mass','gaussian_fit']:
msg = "invalid center_method flag '%s'" % method
raise ValueError(msg)
if indices is None:
indices = np.arange(1,np.max(labels)+1)
if method == 'maxima':
bc = ndimage.maximum_position(rate_map, labels=labels,
index=indices)
elif method == 'center_of_mass':
bc = ndimage.center_of_mass(rate_map, labels=labels, index=indices)
elif method == 'gaussian_fit':
from exana.tracking.tools import fit_gauss_asym
bc = np.zeros((len(indices),2))
import matplotlib.pyplot as plt
for i in indices:
r = rate_map.copy()
r[labels != i] = 0
popt = fit_gauss_asym(r, return_data=False)
# TODO Find out which axis is x and which is y
bc[i-1] = (popt[2],popt[1])
if ret_index:
msg = 'ret_index not implemented for gaussian fit'
raise NotImplementedError(msg)
if not ret_index and not method=='gaussian_fit':
bc = (bc + np.array((0.5,0.5)))/rate_map.shape
return np.array(bc)*units
def find_avg_dist(rate_map, thrsh = 0, plot=False):
"""Uses autocorrelation and separate_fields to find average distance
between bumps. Is dependent on high gridness to get separate bumps in
the autocorrelation
Parameters
----------
rate_map : np 2d array
firing rate in each bin
thrsh (optional) : float, default 0
cutoff value for the laplacian of the autocorrelation function.
Should be a negative number. Gives better separation if bumps are
connected by "bridges" or saddles where the laplacian is negative.
plot (optional) : bool, default False
plot acorr and the separated acorr, with bump centers
Returns
-------
avg_dist : float
relative units from 0 to 1 of the box size
"""
from scipy.ndimage import maximum_position
from exana.misc.tools import fftcorrelate2d
# autocorrelate. Returns array (2x - 1) the size of rate_map
acorr = fftcorrelate2d(rate_map,rate_map, mode = 'full', normalize = True)
#acorr[acorr<0] = 0 # TODO Fix this
f, nf, bump_centers = separate_fields(acorr,laplace_thrsh=thrsh,
center_method='maxima',cutoff_method='median')
# TODO Find a way to find valid value for
# thrsh, or remove.
bump_centers = np.array(bump_centers)
# find dists from center in (autocorrelation)relative units (from 0 to 1)
distances = np.linalg.norm(bump_centers - (0.5,0.5), axis = 1)
dist_sort = np.argsort(distances)
distances = distances[dist_sort]
# use maximum 6 closest values except center value
avg_dist = np.median(distances[1:7])
# correct for difference in shapes
avg_dist *= acorr.shape[0]/rate_map.shape[0] # = 1.98
# TODO : raise warning if too big difference between points
if plot:
import matplotlib.pyplot as plt
fig,[ax1,ax2] = plt.subplots(1,2)
ax1.imshow(acorr,extent = (0,1,0,1),origin='lower')
ax1.scatter(*(bump_centers[:,::-1].T))
ax2.imshow(f,extent = (0,1,0,1),origin='lower')
ax2.scatter(*(bump_centers[:,::-1].T))
return avg_dist
def fit_hex(bump_centers, avg_dist=None, plot_bumps = False, method='best'):
"""Fits a hex grid to a given set of bumps. Uses the three bumps most
Parameters
----------
bump_centers : Nx2 np.array
x,y positions of bump centers, x,y /in (0,1)
avg_dist (optional): float
average spacing between bumps
plot_bumps (optional): bool
if True, plots at the three bumps most likely to be in
correct hex-position to the current matplotlib axes.
method (optional): string, valid options: ['closest', 'best']
method to find angle from neighboring bumps.
'closest' uses six bumps nearest to center bump
'best' uses the two bumps nearest to avg_dist
Returns
-------
displacement : float
distance of bump closest to the center in meters
orientation : float
orientation of hexagon (in degrees)
"""
valid_methods = ['closest', 'best']
if method not in valid_methods:
msg = "invalid method flag '%s'" % method
raise ValueError(msg)
bump_centers = np.array(bump_centers)
# sort by distance to center
d = np.linalg.norm(bump_centers - (0.5,0.5), axis=1)
d_sort = np.argsort(d)
dist_sorted = bump_centers[d_sort]
center_bump = dist_sorted[0]; others = dist_sorted[1:]
displacement = d[d_sort][0]
# others distances to center bumps
relpos = others - center_bump
reldist = np.linalg.norm(relpos, axis=1)
if method == 'closest':
# get 6 closest bumps
rel_sort = np.argsort(reldist)
closest = others[rel_sort][:6]
relpos = relpos[rel_sort][:6]
elif method == 'best':
# get 2 bumps such that /sum_{i\neqj}(\abs{r_i-r_j}-avg_ist)^2 is minimized
squares = 1e32*np.ones((others.shape[0], others.shape[0]))
for i in range(len(relpos)):
for j in range(i,len(relpos)):
rel1 = (reldist[i] - avg_dist)**2
rel2 = (reldist[j] - avg_dist)**2
rel3 = (np.linalg.norm(relpos[i]-relpos[j]) - avg_dist)**2
squares[i,j] = rel1 + rel2 + rel3
rel_slice = np.unravel_index(np.argmin(squares), squares.shape)
rel_slice = np.array(rel_slice)
#rel_sort = np.argsort(np.abs(reldist-avg_dist))
closest = others[rel_slice]
relpos = relpos[rel_slice]
# sort by angle
a = np.arctan2(relpos[:,1], relpos[:,0])%(2*np.pi)
a_sort = np.argsort(a)
# extract lowest angle and convert to degrees
orientation = a[a_sort][0] *180/np.pi
# hex grid is symmetric under rotations of 60deg
orientation %= 60
if plot_bumps:
import matplotlib.pyplot as plt
ax=plt.gca()
i = 1
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
dx = xmax-xmin; dy = ymax - ymin
closest = closest[a_sort]
edges = [center_bump] if method == 'best' else []
edges += [c for c in closest]
edges = np.array(edges)*(dx,dy) + (xmin, ymin)
poly = plt.Polygon(edges, alpha=0.5,color='r')
ax.add_artist(poly)
return displacement, orientation
def calculate_grid_geometry(rate_map, plot_fields=False, **kwargs):
"""Calculates quantitative information about grid field.
Find bump centers, bump spacing, center diplacement and hexagon
orientation
Parameters
----------
rate_map : np 2d array
firing rate in each bin
plot_fields : if True, plots the field labels with field centers to the
current matplotlib ax. Default False
Returns
-------
bump_centers : 2d np.array
x,y positions of bump centers
avg_dist : float
average spacing between bumps, \in [0,1]
displacement : float
distance of bump closest to the center
orientation : float
orientation of hexagon (in degrees)
Other parameters
----------------
thrsh : float, default 0
see find_avg_dist()
center_method : string, valid options: ['maxima', 'center_of_mass']
default: 'center_of_mass'
see separate_fields()
method : string, valid options: ['closest', 'best']
see fit_hex()
Examples
--------
>>> import numpy as np
>>> rate_map = np.zeros((5,5))
>>> pos = np.array([ [0,2],
... [1,0],[1,4],
... [2,2],
... [3,0],[3,4],
... [4,2]])
>>> for(i,j) in pos:
... rate_map[i,j] = 1
...
>>> calculate_grid_geometry(rate_map)
(array([[ 0.5, 0.9],
[ 0.9, 0.7],
[ 0.1, 0.7],
[ 0.5, 0.5],
[ 0.9, 0.3],
[ 0.1, 0.3],
[ 0.5, 0.1]]) * m, 0.44721359549995793, 0.0, 26.565051177077983)
>>>
"""
from scipy.ndimage import mean, center_of_mass
# TODO: smooth data?
# smooth_rate_map = lambda x:x
# rate_map = smooth_rate_map(rate_map)
center_method = kwargs.pop('center_method',None)
if center_method:
fields, nfields, bump_centers = separate_fields(rate_map,
center_method=center_method)
else:
fields, nfields, bump_centers = separate_fields(rate_map)
if bump_centers.size == 0:
import warnings
msg = 'couldnt find bump centers, returning None'
warnings.warn(msg, RuntimeWarning, stacklevel=2)
return None,None,None,None,
sh = np.array(rate_map.shape)
if plot_fields:
print(fields)
import matplotlib.pyplot as plt
x=np.linspace(0,1,sh[0]+1)
y=np.linspace(0,1,sh[1]+1)
x,y = np.meshgrid(x,y)
ax = plt.gca()
print('nfields: ',nfields)
plt.pcolormesh(x,y, fields)
# switch from row-column to x-y
bump_centers = bump_centers[:,::-1]
thrsh = kwargs.pop('thrsh', None)
if thrsh:
avg_dist = find_avg_dist(rate_map, thrsh)
else:
avg_dist = find_avg_dist(rate_map)
displacement, orientation = fit_hex(bump_centers, avg_dist,
plot_bumps=plot_fields, **kwargs)
return bump_centers, avg_dist, displacement, orientation
class RandomDisplacementBounds(object):
"""random displacement with bounds"""
def __init__(self, xmin, xmax, stepsize=0.5):
self.xmin = np.array(xmin)
self.xmax = np.array(xmax)
self.stepsize = stepsize
def __call__(self, x):
"""take a random step but ensure the new position is within the bounds"""
while True:
# this could be done in a much more clever way, but it will work for example purposes
xnew = x + (self.xmax-self.xmin)*np.random.uniform(-self.stepsize,
self.stepsize, np.shape(x))
if np.all(xnew < self.xmax) and np.all(xnew > self.xmin):
break
return xnew
def optimize_sep_fields(rate_map,step = 0.04, niter=40, T = 1.0, method = 'SLSQP',
glob=True, x0 = [0.065,0.1],callback=None):
"""Optimizes the separation of the fields by minimizing an error
function
Parameters:
-----------
rate_map :
method :
valid methods=['L-BFGS-B', 'TNC', 'SLSQP']
x0 : list
initial values for smoothing smoothing and laplace_thrsh
Returns:
--------
res :
Result of the optimization. Contains smoothing and laplace_thrsh in
attribute res.x"""
from scipy import optimize
from exana.tracking.tools import separation_error_func as err_func
valid_methods = ['L-BFGS-B', 'TNC', 'SLSQP']
if method not in valid_methods:
raise ValueError('invalid method flag %s' %method)
rate_map[np.isnan(rate_map)] = 0.
method = 'SLSQP'
xmin = [0.025, 0]
xmax = [0.2, 1]
bounds = [(low,high) for low,high in zip(xmin,xmax)]
obj_func = lambda args: err_func(args[0], args[1], rate_map)
if glob:
take_step = RandomDisplacementBounds(xmin, xmax,stepsize=step)
minimizer_kwargs = dict(method=method, bounds=bounds)
res = optimize.basinhopping(obj_func, x0, niter=niter, T = T,
minimizer_kwargs=minimizer_kwargs,
take_step=take_step,callback=callback)
else:
res = optimize.minimize(obj_func, x0, method=method, bounds = bounds, options={'disp': True})
return res
if __name__ == "__main__":
import doctest
doctest.testmod()
|
edlabh/SickRage
|
refs/heads/master
|
lib/unidecode/x079.py
|
252
|
data = (
'Tani ', # 0x00
'Jiao ', # 0x01
'[?] ', # 0x02
'Zhang ', # 0x03
'Qiao ', # 0x04
'Dun ', # 0x05
'Xian ', # 0x06
'Yu ', # 0x07
'Zhui ', # 0x08
'He ', # 0x09
'Huo ', # 0x0a
'Zhai ', # 0x0b
'Lei ', # 0x0c
'Ke ', # 0x0d
'Chu ', # 0x0e
'Ji ', # 0x0f
'Que ', # 0x10
'Dang ', # 0x11
'Yi ', # 0x12
'Jiang ', # 0x13
'Pi ', # 0x14
'Pi ', # 0x15
'Yu ', # 0x16
'Pin ', # 0x17
'Qi ', # 0x18
'Ai ', # 0x19
'Kai ', # 0x1a
'Jian ', # 0x1b
'Yu ', # 0x1c
'Ruan ', # 0x1d
'Meng ', # 0x1e
'Pao ', # 0x1f
'Ci ', # 0x20
'[?] ', # 0x21
'[?] ', # 0x22
'Mie ', # 0x23
'Ca ', # 0x24
'Xian ', # 0x25
'Kuang ', # 0x26
'Lei ', # 0x27
'Lei ', # 0x28
'Zhi ', # 0x29
'Li ', # 0x2a
'Li ', # 0x2b
'Fan ', # 0x2c
'Que ', # 0x2d
'Pao ', # 0x2e
'Ying ', # 0x2f
'Li ', # 0x30
'Long ', # 0x31
'Long ', # 0x32
'Mo ', # 0x33
'Bo ', # 0x34
'Shuang ', # 0x35
'Guan ', # 0x36
'Lan ', # 0x37
'Zan ', # 0x38
'Yan ', # 0x39
'Shi ', # 0x3a
'Shi ', # 0x3b
'Li ', # 0x3c
'Reng ', # 0x3d
'She ', # 0x3e
'Yue ', # 0x3f
'Si ', # 0x40
'Qi ', # 0x41
'Ta ', # 0x42
'Ma ', # 0x43
'Xie ', # 0x44
'Xian ', # 0x45
'Xian ', # 0x46
'Zhi ', # 0x47
'Qi ', # 0x48
'Zhi ', # 0x49
'Beng ', # 0x4a
'Dui ', # 0x4b
'Zhong ', # 0x4c
'[?] ', # 0x4d
'Yi ', # 0x4e
'Shi ', # 0x4f
'You ', # 0x50
'Zhi ', # 0x51
'Tiao ', # 0x52
'Fu ', # 0x53
'Fu ', # 0x54
'Mi ', # 0x55
'Zu ', # 0x56
'Zhi ', # 0x57
'Suan ', # 0x58
'Mei ', # 0x59
'Zuo ', # 0x5a
'Qu ', # 0x5b
'Hu ', # 0x5c
'Zhu ', # 0x5d
'Shen ', # 0x5e
'Sui ', # 0x5f
'Ci ', # 0x60
'Chai ', # 0x61
'Mi ', # 0x62
'Lu ', # 0x63
'Yu ', # 0x64
'Xiang ', # 0x65
'Wu ', # 0x66
'Tiao ', # 0x67
'Piao ', # 0x68
'Zhu ', # 0x69
'Gui ', # 0x6a
'Xia ', # 0x6b
'Zhi ', # 0x6c
'Ji ', # 0x6d
'Gao ', # 0x6e
'Zhen ', # 0x6f
'Gao ', # 0x70
'Shui ', # 0x71
'Jin ', # 0x72
'Chen ', # 0x73
'Gai ', # 0x74
'Kun ', # 0x75
'Di ', # 0x76
'Dao ', # 0x77
'Huo ', # 0x78
'Tao ', # 0x79
'Qi ', # 0x7a
'Gu ', # 0x7b
'Guan ', # 0x7c
'Zui ', # 0x7d
'Ling ', # 0x7e
'Lu ', # 0x7f
'Bing ', # 0x80
'Jin ', # 0x81
'Dao ', # 0x82
'Zhi ', # 0x83
'Lu ', # 0x84
'Shan ', # 0x85
'Bei ', # 0x86
'Zhe ', # 0x87
'Hui ', # 0x88
'You ', # 0x89
'Xi ', # 0x8a
'Yin ', # 0x8b
'Zi ', # 0x8c
'Huo ', # 0x8d
'Zhen ', # 0x8e
'Fu ', # 0x8f
'Yuan ', # 0x90
'Wu ', # 0x91
'Xian ', # 0x92
'Yang ', # 0x93
'Ti ', # 0x94
'Yi ', # 0x95
'Mei ', # 0x96
'Si ', # 0x97
'Di ', # 0x98
'[?] ', # 0x99
'Zhuo ', # 0x9a
'Zhen ', # 0x9b
'Yong ', # 0x9c
'Ji ', # 0x9d
'Gao ', # 0x9e
'Tang ', # 0x9f
'Si ', # 0xa0
'Ma ', # 0xa1
'Ta ', # 0xa2
'[?] ', # 0xa3
'Xuan ', # 0xa4
'Qi ', # 0xa5
'Yu ', # 0xa6
'Xi ', # 0xa7
'Ji ', # 0xa8
'Si ', # 0xa9
'Chan ', # 0xaa
'Tan ', # 0xab
'Kuai ', # 0xac
'Sui ', # 0xad
'Li ', # 0xae
'Nong ', # 0xaf
'Ni ', # 0xb0
'Dao ', # 0xb1
'Li ', # 0xb2
'Rang ', # 0xb3
'Yue ', # 0xb4
'Ti ', # 0xb5
'Zan ', # 0xb6
'Lei ', # 0xb7
'Rou ', # 0xb8
'Yu ', # 0xb9
'Yu ', # 0xba
'Chi ', # 0xbb
'Xie ', # 0xbc
'Qin ', # 0xbd
'He ', # 0xbe
'Tu ', # 0xbf
'Xiu ', # 0xc0
'Si ', # 0xc1
'Ren ', # 0xc2
'Tu ', # 0xc3
'Zi ', # 0xc4
'Cha ', # 0xc5
'Gan ', # 0xc6
'Yi ', # 0xc7
'Xian ', # 0xc8
'Bing ', # 0xc9
'Nian ', # 0xca
'Qiu ', # 0xcb
'Qiu ', # 0xcc
'Chong ', # 0xcd
'Fen ', # 0xce
'Hao ', # 0xcf
'Yun ', # 0xd0
'Ke ', # 0xd1
'Miao ', # 0xd2
'Zhi ', # 0xd3
'Geng ', # 0xd4
'Bi ', # 0xd5
'Zhi ', # 0xd6
'Yu ', # 0xd7
'Mi ', # 0xd8
'Ku ', # 0xd9
'Ban ', # 0xda
'Pi ', # 0xdb
'Ni ', # 0xdc
'Li ', # 0xdd
'You ', # 0xde
'Zu ', # 0xdf
'Pi ', # 0xe0
'Ba ', # 0xe1
'Ling ', # 0xe2
'Mo ', # 0xe3
'Cheng ', # 0xe4
'Nian ', # 0xe5
'Qin ', # 0xe6
'Yang ', # 0xe7
'Zuo ', # 0xe8
'Zhi ', # 0xe9
'Zhi ', # 0xea
'Shu ', # 0xeb
'Ju ', # 0xec
'Zi ', # 0xed
'Huo ', # 0xee
'Ji ', # 0xef
'Cheng ', # 0xf0
'Tong ', # 0xf1
'Zhi ', # 0xf2
'Huo ', # 0xf3
'He ', # 0xf4
'Yin ', # 0xf5
'Zi ', # 0xf6
'Zhi ', # 0xf7
'Jie ', # 0xf8
'Ren ', # 0xf9
'Du ', # 0xfa
'Yi ', # 0xfb
'Zhu ', # 0xfc
'Hui ', # 0xfd
'Nong ', # 0xfe
'Fu ', # 0xff
)
|
mancoast/CPythonPyc_test
|
refs/heads/master
|
fail/334_test_fcntl.py
|
24
|
"""Test program for the fcntl C module.
OS/2+EMX doesn't support the file locking operations.
"""
import platform
import os
import struct
import sys
import _testcapi
import unittest
from test.support import verbose, TESTFN, unlink, run_unittest, import_module
# Skip test if no fcntl module.
fcntl = import_module('fcntl')
# TODO - Write tests for flock() and lockf().
def get_lockdata():
try:
os.O_LARGEFILE
except AttributeError:
start_len = "ll"
else:
start_len = "qq"
if (sys.platform.startswith(('netbsd', 'freebsd', 'openbsd', 'bsdos'))
or sys.platform == 'darwin'):
if struct.calcsize('l') == 8:
off_t = 'l'
pid_t = 'i'
else:
off_t = 'lxxxx'
pid_t = 'l'
lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0,
fcntl.F_WRLCK, 0)
elif sys.platform.startswith('gnukfreebsd'):
lockdata = struct.pack('qqihhi', 0, 0, 0, fcntl.F_WRLCK, 0, 0)
elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']:
lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
elif sys.platform in ['os2emx']:
lockdata = None
else:
lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0)
if lockdata:
if verbose:
print('struct.pack: ', repr(lockdata))
return lockdata
lockdata = get_lockdata()
class TestFcntl(unittest.TestCase):
def setUp(self):
self.f = None
def tearDown(self):
if self.f and not self.f.closed:
self.f.close()
unlink(TESTFN)
def test_fcntl_fileno(self):
# the example from the library docs
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
if verbose:
print('Status from fcntl with O_NONBLOCK: ', rv)
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata)
if verbose:
print('String from fcntl with F_SETLKW: ', repr(rv))
self.f.close()
def test_fcntl_file_descriptor(self):
# again, but pass the file rather than numeric descriptor
self.f = open(TESTFN, 'wb')
rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK)
if sys.platform not in ['os2emx']:
rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata)
self.f.close()
def test_fcntl_bad_file(self):
class F:
def __init__(self, fn):
self.fn = fn
def fileno(self):
return self.fn
self.assertRaises(ValueError, fcntl.fcntl, -1, fcntl.F_SETFL, os.O_NONBLOCK)
self.assertRaises(ValueError, fcntl.fcntl, F(-1), fcntl.F_SETFL, os.O_NONBLOCK)
self.assertRaises(TypeError, fcntl.fcntl, 'spam', fcntl.F_SETFL, os.O_NONBLOCK)
self.assertRaises(TypeError, fcntl.fcntl, F('spam'), fcntl.F_SETFL, os.O_NONBLOCK)
# Issue 15989
self.assertRaises(OverflowError, fcntl.fcntl, _testcapi.INT_MAX + 1,
fcntl.F_SETFL, os.O_NONBLOCK)
self.assertRaises(OverflowError, fcntl.fcntl, F(_testcapi.INT_MAX + 1),
fcntl.F_SETFL, os.O_NONBLOCK)
self.assertRaises(OverflowError, fcntl.fcntl, _testcapi.INT_MIN - 1,
fcntl.F_SETFL, os.O_NONBLOCK)
self.assertRaises(OverflowError, fcntl.fcntl, F(_testcapi.INT_MIN - 1),
fcntl.F_SETFL, os.O_NONBLOCK)
@unittest.skipIf(
platform.machine().startswith('arm') and platform.system() == 'Linux',
"ARM Linux returns EINVAL for F_NOTIFY DN_MULTISHOT")
def test_fcntl_64_bit(self):
# Issue #1309352: fcntl shouldn't fail when the third arg fits in a
# C 'long' but not in a C 'int'.
try:
cmd = fcntl.F_NOTIFY
# This flag is larger than 2**31 in 64-bit builds
flags = fcntl.DN_MULTISHOT
except AttributeError:
self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable")
fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY)
try:
fcntl.fcntl(fd, cmd, flags)
finally:
os.close(fd)
def test_main():
run_unittest(TestFcntl)
if __name__ == '__main__':
test_main()
|
widdowquinn/find_differential_primers
|
refs/heads/diagnostic_primers
|
diagnostic_primers/scripts/subcommands/subcmd_primersearch.py
|
1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""subcmd_primersearch.py
Provides the primersearch subcommand for pdp.py
(c) The James Hutton Institute 2017-19
Author: Leighton Pritchard
Contact: leighton.pritchard@hutton.ac.uk
Leighton Pritchard,
Information and Computing Sciences,
James Hutton Institute,
Errol Road,
Invergowrie,
Dundee,
DD2 5DA,
Scotland,
UK
The MIT License
Copyright (c) 2017-19 The James Hutton Institute
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
from diagnostic_primers import primersearch
from diagnostic_primers.scripts.tools import (
collect_existing_output,
create_output_directory,
load_config_json,
log_clines,
run_parallel_jobs,
)
def subcmd_primersearch(args, logger):
"""Perform in silico hybridisation with EMBOSS PrimerSearch."""
# Does output already exist, and should we overwrite?
create_output_directory(args.ps_dir, args.ps_force, logger)
# Get config file data
coll = load_config_json(args, logger)
# If we are in recovery mode, we are salvaging output from a previous
# run, and do not necessarily need to rerun all the jobs. In this case,
# we prepare a list of output files we want to recover from the results
# in the output directory.
existingfiles = []
if args.recovery:
logger.warning("Entering recovery mode")
logger.info(
"\tIn this mode, existing comparison output from %s is reused", args.ps_dir
)
existingfiles = collect_existing_output(args.ps_dir, "primersearch", args)
logger.info(
"Existing files found:\n\t%s", "\n\t".join([_ for _ in existingfiles])
)
# Construct command lines for primersearch
logger.info("Building primersearch command-lines...")
mismatchpercent = int(100 * args.mismatchpercent) # for EMBOSS
clines = primersearch.build_commands(
coll, args.ps_dir, mismatchpercent, existingfiles
)
if clines:
pretty_clines = [str(c).replace(" -", " \\\n -") for c in clines]
log_clines(pretty_clines, logger)
run_parallel_jobs(clines, args, logger)
else:
logger.warning(
"No primersearch jobs were scheduled "
"(you may see this if the --recovery option is active)"
)
# Load PrimerSearch output and generate .json/.bed files of amplimers
# (regions on each target genome amplified by a primer)
logger.info("Identifying target amplicoms")
amplimers = primersearch.load_collection_amplicons(coll)
amplimerpath = os.path.join(args.ps_dir, "target_amplicons.json")
logger.info("Writing all target amplicons to %s", amplimerpath)
amplimers.write_json(amplimerpath)
# Subdivide the amplimers into a new PDPGenomeAmplicons object - one per
# input genome, and write bed/JSON files accordingly
logger.info("Writing individual amplicon files for each target")
for obj in amplimers.split_on_targets():
jsonpath = os.path.join(args.ps_dir, "{}_amplicons.json".format(obj.targets[0]))
logger.info("\tWorking with target %s", obj.targets[0])
obj.write_json(jsonpath)
obj.write_bed(args.ps_dir)
# Add the JSON file to the appropriate entry in the collection
coll[obj.name].target_amplicons = jsonpath
# Write new config file, and exit
logger.info("Writing new config file to %s", args.outfilename)
coll.write_json(args.outfilename)
return 0
|
dturner-tw/pants
|
refs/heads/master
|
src/python/pants/option/global_options.py
|
1
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
from pants.base.build_environment import (get_buildroot, get_pants_cachedir, get_pants_configdir,
pants_version)
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.custom_types import list_option
from pants.option.optionable import Optionable
from pants.option.scope import ScopeInfo
class GlobalOptionsRegistrar(Optionable):
options_scope = GLOBAL_SCOPE
options_scope_category = ScopeInfo.GLOBAL
@classmethod
def register_bootstrap_options(cls, register):
"""Register bootstrap options.
"Bootstrap options" are a small set of options whose values are useful when registering other
options. Therefore we must bootstrap them early, before other options are registered, let
alone parsed.
Bootstrap option values can be interpolated into the config file, and can be referenced
programatically in registration code, e.g., as register.bootstrap.pants_workdir.
Note that regular code can also access these options as normal global-scope options. Their
status as "bootstrap options" is only pertinent during option registration.
"""
buildroot = get_buildroot()
# Although logging supports the WARN level, its not documented and could conceivably be yanked.
# Since pants has supported 'warn' since inception, leave the 'warn' choice as-is but explicitly
# setup a 'WARN' logging level name that maps to 'WARNING'.
logging.addLevelName(logging.WARNING, 'WARN')
register('-l', '--level', choices=['debug', 'info', 'warn'], default='info', recursive=True,
help='Set the logging level.')
register('-q', '--quiet', action='store_true', recursive=True,
help='Squelches most console output.')
# Not really needed in bootstrap options, but putting it here means it displays right
# after -l and -q in help output, which is conveniently contextual.
register('--colors', action='store_true', default=True, recursive=True,
help='Set whether log messages are displayed in color.')
# Pants code uses this only to verify that we are of the requested version. However
# setup scripts, runner scripts, IDE plugins, etc., may grep this out of pants.ini
# and use it to select the right version.
# Note that to print the version of the pants instance you're running, use -v, -V or --version.
register('--pants-version', advanced=True, default=pants_version(),
help='Use this pants version.')
register('--plugins', advanced=True, type=list_option, help='Load these plugins.')
register('--plugin-cache-dir', advanced=True,
default=os.path.join(get_pants_cachedir(), 'plugins'),
help='Cache resolved plugin requirements here.')
register('--backend-packages', advanced=True, type=list_option,
help='Load backends from these packages that are already on the path.')
register('--pants-bootstrapdir', advanced=True, metavar='<dir>', default=get_pants_cachedir(),
help='Use this dir for global cache.')
register('--pants-configdir', advanced=True, metavar='<dir>', default=get_pants_configdir(),
help='Use this dir for global config files.')
register('--pants-workdir', advanced=True, metavar='<dir>',
default=os.path.join(buildroot, '.pants.d'),
help='Write intermediate output files to this dir.')
register('--pants-supportdir', advanced=True, metavar='<dir>',
default=os.path.join(buildroot, 'build-support'),
help='Use support files from this dir.')
register('--pants-distdir', advanced=True, metavar='<dir>',
default=os.path.join(buildroot, 'dist'),
help='Write end-product artifacts to this dir.')
register('--config-override', advanced=True, action='append', metavar='<path>',
help='A second config file, to override pants.ini.')
register('--pantsrc', advanced=True, action='store_true', default=True,
help='Use pantsrc files.')
register('--pantsrc-files', advanced=True, action='append', metavar='<path>',
default=['/etc/pantsrc', '~/.pants.rc'],
help='Override config with values from these files. '
'Later files override earlier ones.')
register('--pythonpath', advanced=True, action='append',
help='Add these directories to PYTHONPATH to search for plugins.')
register('--target-spec-file', action='append', dest='target_spec_files',
help='Read additional specs from this file, one per line')
# These logging options are registered in the bootstrap phase so that plugins can log during
# registration and not so that their values can be interpolated in configs.
register('-d', '--logdir', advanced=True, metavar='<dir>',
help='Write logs to files under this directory.')
# This facilitates bootstrap-time configuration of pantsd usage such that we can
# determine whether or not to use the Pailgun client to invoke a given pants run
# without resorting to heavier options parsing.
register('--enable-pantsd', advanced=True, action='store_true', default=False,
help='Enables use of the pants daemon. (Beta)')
@classmethod
def register_options(cls, register):
"""Register options not tied to any particular task or subsystem."""
# The bootstrap options need to be registered on the post-bootstrap Options instance, so it
# won't choke on them on the command line, and also so we can access their values as regular
# global-scope options, for convenience.
cls.register_bootstrap_options(register)
register('-x', '--time', action='store_true',
help='Output a timing report at the end of the run.')
register('-e', '--explain', action='store_true',
help='Explain the execution of goals.')
register('--tag', action='append', metavar='[+-]tag1,tag2,...',
help="Include only targets with these tags (optional '+' prefix) or without these "
"tags ('-' prefix). Useful with ::, to find subsets of targets "
"(e.g., integration tests.)")
register('-t', '--timeout', advanced=True, type=int, metavar='<seconds>',
help='Number of seconds to wait for http connections.')
# TODO: After moving to the new options system these abstraction leaks can go away.
register('-k', '--kill-nailguns', advanced=True, action='store_true',
help='Kill nailguns before exiting')
register('-i', '--interpreter', advanced=True, default=[], action='append',
metavar='<requirement>',
help="Constrain what Python interpreters to use. Uses Requirement format from "
"pkg_resources, e.g. 'CPython>=2.6,<3' or 'PyPy'. By default, no constraints "
"are used. Multiple constraints may be added. They will be ORed together.")
register('--exclude-target-regexp', advanced=True, action='append', default=[],
metavar='<regexp>',
help='Exclude targets that match these regexes. Useful with ::, to ignore broken '
'BUILD files.',
recursive=True) # TODO: Does this need to be recursive? What does that even mean?
register('--spec-excludes', advanced=True, action='append',
default=[register.bootstrap.pants_workdir],
help='Ignore these paths when evaluating the command-line target specs. Useful with '
'::, to avoid descending into unneeded directories.')
register('--fail-fast', advanced=True, action='store_true', recursive=True,
help='Exit as quickly as possible on error, rather than attempting to continue '
'to process the non-erroneous subset of the input.')
register('--cache-key-gen-version', advanced=True, default='200', recursive=True,
help='The cache key generation. Bump this to invalidate every artifact for a scope.')
register('--max-subprocess-args', advanced=True, type=int, default=100, recursive=True,
help='Used to limit the number of arguments passed to some subprocesses by breaking '
'the command up into multiple invocations')
register('--print-exception-stacktrace', advanced=True, action='store_true',
help='Print to console the full exception stack trace if encountered.')
register('--build-file-rev', advanced=True,
help='Read BUILD files from this scm rev instead of from the working tree. This is '
'useful for implementing pants-aware sparse checkouts.')
register('--lock', advanced=True, action='store_true', default=True,
help='Use a global lock to exclude other versions of pants from running during '
'critical operations.')
|
RevelSystems/django
|
refs/heads/master
|
tests/gis_tests/geoapp/test_serializers.py
|
50
|
from __future__ import unicode_literals
import json
from django.contrib.gis.geos import HAS_GEOS
from django.core import serializers
from django.test import TestCase, skipUnlessDBFeature
if HAS_GEOS:
from django.contrib.gis.geos import LinearRing, Point, Polygon
from .models import City, MultiFields, PennsylvaniaCity
@skipUnlessDBFeature("gis_enabled")
class GeoJSONSerializerTests(TestCase):
fixtures = ['initial']
def test_builtin_serializers(self):
"""
'geojson' should be listed in available serializers.
"""
all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
self.assertIn('geojson', all_formats),
self.assertIn('geojson', public_formats)
def test_serialization_base(self):
geojson = serializers.serialize('geojson', City.objects.all().order_by('name'))
try:
geodata = json.loads(geojson)
except Exception:
self.fail("Serialized output is not valid JSON")
self.assertEqual(len(geodata['features']), len(City.objects.all()))
self.assertEqual(geodata['features'][0]['geometry']['type'], 'Point')
self.assertEqual(geodata['features'][0]['properties']['name'], 'Chicago')
def test_geometry_field_option(self):
"""
When a model has several geometry fields, the 'geometry_field' option
can be used to specify the field to use as the 'geometry' key.
"""
MultiFields.objects.create(
city=City.objects.first(), name='Name', point=Point(5, 23),
poly=Polygon(LinearRing((0, 0), (0, 5), (5, 5), (5, 0), (0, 0))))
geojson = serializers.serialize('geojson', MultiFields.objects.all())
geodata = json.loads(geojson)
self.assertEqual(geodata['features'][0]['geometry']['type'], 'Point')
geojson = serializers.serialize('geojson', MultiFields.objects.all(),
geometry_field='poly')
geodata = json.loads(geojson)
self.assertEqual(geodata['features'][0]['geometry']['type'], 'Polygon')
def test_fields_option(self):
"""
The fields option allows to define a subset of fields to be present in
the 'properties' of the generated output.
"""
PennsylvaniaCity.objects.create(name='Mansfield', county='Tioga', point='POINT(-77.071445 41.823881)')
geojson = serializers.serialize('geojson', PennsylvaniaCity.objects.all(),
fields=('county', 'point'))
geodata = json.loads(geojson)
self.assertIn('county', geodata['features'][0]['properties'])
self.assertNotIn('founded', geodata['features'][0]['properties'])
def test_srid_option(self):
geojson = serializers.serialize('geojson', City.objects.all().order_by('name'), srid=2847)
geodata = json.loads(geojson)
self.assertEqual(
[int(c) for c in geodata['features'][0]['geometry']['coordinates']],
[1564802, 5613214])
def test_deserialization_exception(self):
"""
GeoJSON cannot be deserialized.
"""
with self.assertRaises(serializers.base.SerializerDoesNotExist):
serializers.deserialize('geojson', '{}')
|
BaileySN/RAID_Reporter
|
refs/heads/master
|
bin/hostinfo.py
|
1
|
import os
import socket
if os.name != "nt":
import fcntl
import struct
def get_interface(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', ifname[:15]))[20:24])
def hostaddr():
ip = socket.gethostbyname(socket.gethostname())
if ip.startswith("127.") and os.name != "nt":
interfaces = [
"vmbr0",
"eth0",
"eth1",
"eth2",
"wlan0",
"wlan1",
"wifi0",
"ath0",
"ath1",
"ppp0",
]
for ifname in interfaces:
try:
ip = get_interface(ifname)
break
except IOError:
pass
return str(ip)
def hostn():
return str(os.uname()[1])
|
shawncaojob/LC
|
refs/heads/master
|
LINTCODE/29_interleaving_string.py
|
1
|
class Solution:
"""
@param: s1: A string
@param: s2: A string
@param: s3: A string
@return: Determine whether s3 is formed by interleaving of s1 and s2
"""
def isInterleave(self, s1, s2, s3):
# write your code here
n1, n2, n3 = len(s1), len(s2), len(s3)
if n1 + n2 != n3: return False
dp = [ [ False for j in xrange(n2 + 1)] for i in xrange(n1 + 1) ]
dp[0][0] = True
for j in xrange(1, n2 + 1):
dp[0][j] = True if dp[0][j-1] and s2[j-1] == s3[j-1] else False
for i in xrange(1, n1 + 1):
dp[i][0] = True if dp[i-1][0] and s1[i-1] == s3[i-1] else False
for i in xrange(1, n1 + 1):
for j in xrange(1, n2 + 1):
if s3[i+j-1] == s1[i-1] and dp[i-1][j]:
dp[i][j] = True
continue
if s3[i+j-1] == s2[j-1] and dp[i][j-1]:
dp[i][j] = True
continue
return dp[-1][-1]
|
analyseuc3m/ANALYSE-v1
|
refs/heads/master
|
cms/djangoapps/contentstore/management/commands/tests/test_create_course.py
|
137
|
"""
Unittests for creating a course in an chosen modulestore
"""
import unittest
import ddt
from django.core.management import CommandError, call_command
from contentstore.management.commands.create_course import Command
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.django import modulestore
class TestArgParsing(unittest.TestCase):
"""
Tests for parsing arguments for the `create_course` management command
"""
def setUp(self):
super(TestArgParsing, self).setUp()
self.command = Command()
def test_no_args(self):
errstring = "create_course requires 5 arguments"
with self.assertRaisesRegexp(CommandError, errstring):
self.command.handle('create_course')
def test_invalid_store(self):
with self.assertRaises(CommandError):
self.command.handle("foo", "user@foo.org", "org", "course", "run")
def test_xml_store(self):
with self.assertRaises(CommandError):
self.command.handle(ModuleStoreEnum.Type.xml, "user@foo.org", "org", "course", "run")
def test_nonexistent_user_id(self):
errstring = "No user 99 found"
with self.assertRaisesRegexp(CommandError, errstring):
self.command.handle("split", "99", "org", "course", "run")
def test_nonexistent_user_email(self):
errstring = "No user fake@example.com found"
with self.assertRaisesRegexp(CommandError, errstring):
self.command.handle("mongo", "fake@example.com", "org", "course", "run")
@ddt.ddt
class TestCreateCourse(ModuleStoreTestCase):
"""
Unit tests for creating a course in either old mongo or split mongo via command line
"""
def setUp(self):
super(TestCreateCourse, self).setUp(create_user=True)
@ddt.data(ModuleStoreEnum.Type.mongo, ModuleStoreEnum.Type.split)
def test_all_stores_user_email(self, store):
call_command(
"create_course",
store,
str(self.user.email),
"org", "course", "run"
)
new_key = modulestore().make_course_key("org", "course", "run")
self.assertTrue(
modulestore().has_course(new_key),
"Could not find course in {}".format(store)
)
# pylint: disable=protected-access
self.assertEqual(store, modulestore()._get_modulestore_for_courselike(new_key).get_modulestore_type())
|
meteorcloudy/tensorflow
|
refs/heads/master
|
tensorflow/contrib/rpc/python/ops/rpc_op.py
|
41
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=wildcard-import,unused-import
"""RPC communication."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.rpc.python.ops.gen_rpc_op import rpc
from tensorflow.contrib.rpc.python.ops.gen_rpc_op import try_rpc
from tensorflow.python.framework import ops
ops.NotDifferentiable("Rpc")
ops.NotDifferentiable("TryRpc")
|
omprakasha/odoo
|
refs/heads/8.0
|
addons/sale/res_partner.py
|
236
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
class res_partner(osv.osv):
_inherit = 'res.partner'
def _sale_order_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict(map(lambda x: (x,0), ids))
# The current user may not have access rights for sale orders
try:
for partner in self.browse(cr, uid, ids, context):
res[partner.id] = len(partner.sale_order_ids) + len(partner.mapped('child_ids.sale_order_ids'))
except:
pass
return res
_columns = {
'sale_order_count': fields.function(_sale_order_count, string='# of Sales Order', type='integer'),
'sale_order_ids': fields.one2many('sale.order','partner_id','Sales Order')
}
|
Immortalin/python-for-android
|
refs/heads/master
|
python-modules/twisted/twisted/python/dxprofile.py
|
61
|
# Copyright (c) 2001-2007 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DEPRECATED since Twisted 8.0.
Utility functions for reporting bytecode frequencies to Skip Montanaro's
stat collector.
This module requires a version of Python build with DYNAMIC_EXCUTION_PROFILE,
and optionally DXPAIRS, defined to be useful.
"""
import sys, types, xmlrpclib, warnings
warnings.warn("twisted.python.dxprofile is deprecated since Twisted 8.0.",
category=DeprecationWarning)
def rle(iterable):
"""
Run length encode a list.
"""
iterable = iter(iterable)
runlen = 1
result = []
try:
previous = iterable.next()
except StopIteration:
return []
for element in iterable:
if element == previous:
runlen = runlen + 1
continue
else:
if isinstance(previous, (types.ListType, types.TupleType)):
previous = rle(previous)
result.append([previous, runlen])
previous = element
runlen = 1
if isinstance(previous, (types.ListType, types.TupleType)):
previous = rle(previous)
result.append([previous, runlen])
return result
def report(email, appname):
"""
Send an RLE encoded version of sys.getdxp() off to our Top Men (tm)
for analysis.
"""
if hasattr(sys, 'getdxp') and appname:
dxp = xmlrpclib.ServerProxy("http://manatee.mojam.com:7304")
dxp.add_dx_info(appname, email, sys.version_info[:3], rle(sys.getdxp()))
|
argaen/aiocache
|
refs/heads/master
|
aiocache/plugins.py
|
1
|
"""
This module implements different plugins you can attach to your cache instance. They
are coded in a collaborative so you can use multiple inheritance.
"""
from aiocache.base import API
class BasePlugin:
@classmethod
def add_hook(cls, func, hooks):
for hook in hooks:
setattr(cls, hook, func)
async def do_nothing(self, *args, **kwargs):
pass
BasePlugin.add_hook(
BasePlugin.do_nothing, ["pre_{}".format(method.__name__) for method in API.CMDS]
)
BasePlugin.add_hook(
BasePlugin.do_nothing, ["post_{}".format(method.__name__) for method in API.CMDS]
)
class TimingPlugin(BasePlugin):
"""
Calculates average, min and max times each command takes. The data is saved
in the cache class as a dict attribute called ``profiling``. For example, to
access the average time of the operation get, you can do ``cache.profiling['get_avg']``
"""
@classmethod
def save_time(cls, method):
async def do_save_time(self, client, *args, took=0, **kwargs):
if not hasattr(client, "profiling"):
client.profiling = {}
previous_total = client.profiling.get("{}_total".format(method), 0)
previous_avg = client.profiling.get("{}_avg".format(method), 0)
previous_max = client.profiling.get("{}_max".format(method), 0)
previous_min = client.profiling.get("{}_min".format(method))
client.profiling["{}_total".format(method)] = previous_total + 1
client.profiling["{}_avg".format(method)] = previous_avg + (took - previous_avg) / (
previous_total + 1
)
client.profiling["{}_max".format(method)] = max(took, previous_max)
client.profiling["{}_min".format(method)] = (
min(took, previous_min) if previous_min else took
)
return do_save_time
for method in API.CMDS:
TimingPlugin.add_hook(
TimingPlugin.save_time(method.__name__), ["post_{}".format(method.__name__)]
)
class HitMissRatioPlugin(BasePlugin):
"""
Calculates the ratio of hits the cache has. The data is saved in the cache class as a dict
attribute called ``hit_miss_ratio``. For example, to access the hit ratio of the cache,
you can do ``cache.hit_miss_ratio['hit_ratio']``. It also provides the "total" and "hits"
keys.
"""
async def post_get(self, client, key, took=0, ret=None):
if not hasattr(client, "hit_miss_ratio"):
client.hit_miss_ratio = {}
client.hit_miss_ratio["total"] = 0
client.hit_miss_ratio["hits"] = 0
client.hit_miss_ratio["total"] += 1
if ret is not None:
client.hit_miss_ratio["hits"] += 1
client.hit_miss_ratio["hit_ratio"] = (
client.hit_miss_ratio["hits"] / client.hit_miss_ratio["total"]
)
async def post_multi_get(self, client, keys, took=0, ret=None):
if not hasattr(client, "hit_miss_ratio"):
client.hit_miss_ratio = {}
client.hit_miss_ratio["total"] = 0
client.hit_miss_ratio["hits"] = 0
client.hit_miss_ratio["total"] += len(keys)
for result in ret:
if result is not None:
client.hit_miss_ratio["hits"] += 1
client.hit_miss_ratio["hit_ratio"] = (
client.hit_miss_ratio["hits"] / client.hit_miss_ratio["total"]
)
|
boucman/systemd
|
refs/heads/master
|
tools/gdb-sd_dump_hashmaps.py
|
14
|
#!/usr/bin/env python3
# -*- Mode: python; coding: utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2014 Michal Schmidt
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
import gdb
class sd_dump_hashmaps(gdb.Command):
"dump systemd's hashmaps"
def __init__(self):
super(sd_dump_hashmaps, self).__init__("sd_dump_hashmaps", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
def invoke(self, arg, from_tty):
d = gdb.parse_and_eval("hashmap_debug_list")
all_entry_sizes = gdb.parse_and_eval("all_entry_sizes")
all_direct_buckets = gdb.parse_and_eval("all_direct_buckets")
hashmap_base_t = gdb.lookup_type("HashmapBase")
uchar_t = gdb.lookup_type("unsigned char")
ulong_t = gdb.lookup_type("unsigned long")
debug_offset = gdb.parse_and_eval("(unsigned long)&((HashmapBase*)0)->debug")
print "type, hash, indirect, entries, max_entries, buckets, creator"
while d:
h = gdb.parse_and_eval("(HashmapBase*)((char*)%d - %d)" % (int(d.cast(ulong_t)), debug_offset))
if h["has_indirect"]:
storage_ptr = h["indirect"]["storage"].cast(uchar_t.pointer())
n_entries = h["indirect"]["n_entries"]
n_buckets = h["indirect"]["n_buckets"]
else:
storage_ptr = h["direct"]["storage"].cast(uchar_t.pointer())
n_entries = h["n_direct_entries"]
n_buckets = all_direct_buckets[int(h["type"])];
t = ["plain", "ordered", "set"][int(h["type"])]
print "%s, %s, %s, %d, %d, %d, %s (%s:%d)" % (t, h["hash_ops"], bool(h["has_indirect"]), n_entries, d["max_entries"], n_buckets, d["func"], d["file"], d["line"])
if arg != "" and n_entries > 0:
dib_raw_addr = storage_ptr + (all_entry_sizes[h["type"]] * n_buckets)
histogram = {}
for i in xrange(0, n_buckets):
dib = int(dib_raw_addr[i])
histogram[dib] = histogram.get(dib, 0) + 1
for dib in sorted(iter(histogram)):
if dib != 255:
print "%3d %8d %f%% of entries" % (dib, histogram[dib], 100.0*histogram[dib]/n_entries)
else:
print "%3d %8d %f%% of slots" % (dib, histogram[dib], 100.0*histogram[dib]/n_buckets)
print "mean DIB of entries: %f" % (sum([dib*histogram[dib] for dib in iter(histogram) if dib != 255])*1.0/n_entries)
blocks = []
current_len = 1
prev = int(dib_raw_addr[0])
for i in xrange(1, n_buckets):
dib = int(dib_raw_addr[i])
if (dib == 255) != (prev == 255):
if prev != 255:
blocks += [[i, current_len]]
current_len = 1
else:
current_len += 1
prev = dib
if prev != 255:
blocks += [[i, current_len]]
# a block may be wrapped around
if len(blocks) > 1 and blocks[0][0] == blocks[0][1] and blocks[-1][0] == n_buckets - 1:
blocks[0][1] += blocks[-1][1]
blocks = blocks[0:-1]
print "max block: %s" % max(blocks, key=lambda a: a[1])
print "sum block lens: %d" % sum(b[1] for b in blocks)
print "mean block len: %f" % (1.0 * sum(b[1] for b in blocks) / len(blocks))
d = d["debug_list_next"]
sd_dump_hashmaps()
|
congma/pypar
|
refs/heads/cma-devel
|
src/ext/pypar_balancer.py
|
2
|
# coding: UTF-8
"""
Simple load balancing with pypar
(based on demo3.py from pypar demo package)
Felix Richter <felix.richter2@uni-rostock.de>
"""
import sys
import time
import numpy
import pypar
PYPAR_WORKTAG = 1
PYPAR_DIETAG = 2
def mprint(txt):
"""
Print message txt
with indentation following the node's rank
"""
import pypar
pre = " " * 8 * pypar.rank()
if type(txt) != type('dummy'):
txt = txt.__str__()
pat = "-%d-"
print pre + (pat % pypar.rank()) + txt
class PyparWork(object):
"""Abstract base class for ant work to be balanced"""
def __init__(self):
pass
def uplink(self, balancer, myid, numprocs, node):
self.balancer = balancer
self.pypar_id = myid
self.pypar_numprocs = numprocs
self.pypar_node = node
def getNumWorkItems(self):
pass
def handleWorkResult(self, result, status):
pass
def calcWorkResult(self, worknum):
pass
def masterBeforeWork(self):
"""Master node calls this before sending out the work"""
pass
def slaveBeforeWork(self):
"""Slave nodes call this before receiving work"""
pass
def masterAfterWork(self):
"""Master node calls this after receiving the last work result"""
pass
def slaveAfterWork(self):
"""Slave nodes call this after sending the last work result"""
pass
def msgprint(self, txt):
pre = " " * 8 * self.pypar_id
if type(txt) != type('dummy'):
txt = txt.__str__()
pat = "-%d-"
print pre + (pat % self.pypar_id) + txt
class PyparBalancer(object):
"""The Load Balancer Class
Initialize it with a PyparWork-derived class instance
which describes the actual work to do.
debug == True - more status messages
"""
def __init__(self, work, debug = False):
self.numprocs = pypar.size() # Number of processes as specified by mpirun
self.myid = pypar.rank() # Id of of this process (myid in [0, numproc-1])
self.node = pypar.get_processor_name() # Host name on which current process is running
self.debug= debug
self.work = work
# Added by Ole Nielsen, 15 May 2008
if self.numprocs < 2:
msg = 'PyparBalancer must run on at least 2 processes'
msg += ' for the Master Slave paradigm to make sense.'
raise Exception, msg
self.work.uplink(self, self.myid, self.numprocs, self.node)
self.numworks = self.work.getNumWorkItems()
print "PyparBalancer initialised on proc %d of %d on node %s" %(self.myid, self.numprocs, self.node)
def master(self):
numcompleted = 0
#--- start slaves distributing the first work slot
for i in range(0, min(self.numprocs-1, self.numworks)):
work = i
slave= i+1
pypar.send(work, destination=slave, tag=PYPAR_WORKTAG)
print '[MASTER ]: sent first work "%s" to node %d' %(work, slave)
# dispatch the remaining work slots on dynamic load-balancing policy
# the quicker to do the job, the more jobs it takes
for work in range(self.numprocs-1, self.numworks):
result, status = pypar.receive(source=pypar.any_source, tag=PYPAR_WORKTAG, return_status=True)
print '[MASTER ]: received result from node %d' %(status.source, )
#print result
numcompleted += 1
pypar.send(work, destination=status.source, tag=PYPAR_WORKTAG)
if self.debug: print '[MASTER ]: sent work "%s" to node %d' %(work, status.source)
self.work.handleWorkResult(result, status)
# all works have been dispatched out
print '[MASTER ]: ToDo : %d' %self.numworks
print '[MASTER ]: Done : %d' %numcompleted
# I've still to take into the remaining completions
while (numcompleted < self.numworks):
result, status = pypar.receive(source=pypar.any_source, tag=PYPAR_WORKTAG, return_status=True)
print '[MASTER ]: received (final) result from node %d' % (status.source, )
print result
numcompleted += 1
print '[MASTER ]: %d completed' %numcompleted
self.work.handleWorkResult(result, status)
print '[MASTER ]: about to terminate slaves'
# Tell slaves to stop working
for i in range(1, self.numprocs):
pypar.send('#', destination=i, tag=PYPAR_DIETAG)
if self.debug: print '[MASTER ]: sent DIETAG to node %d' %(i,)
def slave(self):
if self.debug: print '[SLAVE %d]: I am processor %d of %d on node %s' % (self.myid, self.myid, self.numprocs, self.node)
if self.debug: print '[SLAVE %d]: Entering work loop' % (self.myid,)
while True:
result, status = pypar.receive(source=0, tag=pypar.any_tag, return_status=True)
print '[SLAVE %d]: received work with tag %d from node %d'\
%(self.myid, status.tag, status.source)
if (status.tag == PYPAR_DIETAG):
print '[SLAVE %d]: received termination from node %d' % (self.myid, 0)
return
else:
worknum = result
if self.debug: print '[SLAVE %d]: work number is %s' % (self.myid, worknum)
myresult = self.work.calcWorkResult(worknum)
pypar.send(myresult, destination=0)
if self.debug: print '[SLAVE %d]: sent result to node %d' % (self.myid, 0)
def run(self):
if self.myid == 0:
self.work.masterBeforeWork()
self.master()
self.work.masterAfterWork()
else:
self.work.slaveBeforeWork()
self.slave()
self.work.slaveAfterWork()
pypar.finalize()
if self.myid != 0:
sys.exit()
# und schluss.
class PyparDemoWork(PyparWork):
"""Example PyparWork implementation"""
def __init__(self):
import numpy
self.worklist = numpy.arange(0.0,20.0)
self.resultlist = numpy.zeros_like(self.worklist)
def getNumWorkItems(self):
return len(self.worklist)
def calcWorkResult(self, worknum):
return [worknum, self.worklist[worknum] + 1]
def handleWorkResult(self, result, status):
self.resultlist[result[0]] = result[1]
def masterBeforeWork(self):
print self.worklist
def slaveBeforeWork(self):
pass
def masterAfterWork(self):
print self.resultlist
def slaveAfterWork(self):
pass
if __name__ == "__main__":
print "-----------------------"
print "::: PyParBalancer TEST "
print "-----------------------"
# create instance of work class
pyparwork = PyparDemoWork()
# create instance of balancer class,
# initialize with work class
balancer = PyparBalancer(pyparwork, True)
# run it
balancer.run()
|
biggihs/python-pptx
|
refs/heads/master
|
features/steps/chart.py
|
1
|
# encoding: utf-8
"""
Gherkin step implementations for chart features.
"""
from __future__ import absolute_import, print_function
import hashlib
from itertools import islice
from behave import given, then, when
from pptx import Presentation
from pptx.chart.chart import Legend
from pptx.chart.data import (
BubbleChartData, CategoryChartData, ChartData, XyChartData
)
from pptx.enum.chart import XL_CHART_TYPE
from pptx.parts.embeddedpackage import EmbeddedXlsxPart
from pptx.util import Inches
from helpers import count, test_pptx
# given ===================================================
@given('a chart')
def given_a_chart(context):
prs = Presentation(test_pptx('shp-common-props'))
sld = prs.slides[0]
context.chart = sld.shapes[6].chart
@given('a chart having {a_or_no} title')
def given_a_chart_having_a_or_no_title(context, a_or_no):
shape_idx = {'no': 0, 'a': 1}[a_or_no]
prs = Presentation(test_pptx('cht-chart-props'))
context.chart = prs.slides[0].shapes[shape_idx].chart
@given('a chart {having_or_not} a legend')
def given_a_chart_having_or_not_a_legend(context, having_or_not):
slide_idx = {
'having': 0,
'not having': 1,
}[having_or_not]
prs = Presentation(test_pptx('cht-legend'))
context.chart = prs.slides[slide_idx].shapes[0].chart
@given('a chart of size and type {spec}')
def given_a_chart_of_size_and_type_spec(context, spec):
slide_idx = {
'2x2 Clustered Bar': 0,
'2x2 100% Stacked Bar': 1,
'2x2 Clustered Column': 2,
'4x3 Line': 3,
'3x1 Pie': 4,
'3x2 XY': 5,
'3x2 Bubble': 6,
}[spec]
prs = Presentation(test_pptx('cht-replace-data'))
chart = prs.slides[slide_idx].shapes[0].chart
context.chart = chart
context.xlsx_sha1 = hashlib.sha1(
chart._workbook.xlsx_part.blob
).hexdigest()
@given('a chart of type {chart_type}')
def given_a_chart_of_type_chart_type(context, chart_type):
slide_idx, shape_idx = {
'Area': (0, 0),
'Stacked Area': (0, 1),
'100% Stacked Area': (0, 2),
'3-D Area': (0, 3),
'3-D Stacked Area': (0, 4),
'3-D 100% Stacked Area': (0, 5),
'Clustered Bar': (1, 0),
'Stacked Bar': (1, 1),
'100% Stacked Bar': (1, 2),
'Clustered Column': (1, 3),
'Stacked Column': (1, 4),
'100% Stacked Column': (1, 5),
'Line': (2, 0),
'Stacked Line': (2, 1),
'100% Stacked Line': (2, 2),
'Marked Line': (2, 3),
'Stacked Marked Line': (2, 4),
'100% Stacked Marked Line': (2, 5),
'Pie': (3, 0),
'Exploded Pie': (3, 1),
'XY (Scatter)': (4, 0),
'XY Lines': (4, 1),
'XY Lines No Markers': (4, 2),
'XY Smooth Lines': (4, 3),
'XY Smooth No Markers': (4, 4),
'Bubble': (5, 0),
'3D-Bubble': (5, 1),
'Radar': (6, 0),
'Marked Radar': (6, 1),
'Filled Radar': (6, 2),
'Line (with date categories)': (7, 0),
}[chart_type]
prs = Presentation(test_pptx('cht-chart-type'))
context.chart = prs.slides[slide_idx].shapes[shape_idx].chart
@given('a chart title')
def given_a_chart_title(context):
prs = Presentation(test_pptx('cht-chart-props'))
context.chart_title = prs.slides[0].shapes[1].chart.chart_title
@given('a chart title having {a_or_no} text frame')
def given_a_chart_title_having_a_or_no_text_frame(context, a_or_no):
prs = Presentation(test_pptx('cht-chart-props'))
shape_idx = {'no': 0, 'a': 1}[a_or_no]
context.chart_title = prs.slides[1].shapes[shape_idx].chart.chart_title
# when ====================================================
@when('I add a Clustered bar chart with multi-level categories')
def when_I_add_a_clustered_bar_chart_with_multi_level_categories(context):
chart_type = XL_CHART_TYPE.BAR_CLUSTERED
chart_data = CategoryChartData()
WEST = chart_data.add_category('WEST')
WEST.add_sub_category('SF')
WEST.add_sub_category('LA')
EAST = chart_data.add_category('EAST')
EAST.add_sub_category('NY')
EAST.add_sub_category('NJ')
chart_data.add_series('Series 1', (1, 2, None, 4))
chart_data.add_series('Series 2', (5, None, 7, 8))
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when('I add a {kind} chart with {cats} categories and {sers} series')
def when_I_add_a_chart_with_categories_and_series(context, kind, cats, sers):
chart_type = {
'Area': XL_CHART_TYPE.AREA,
'Stacked Area': XL_CHART_TYPE.AREA_STACKED,
'100% Stacked Area': XL_CHART_TYPE.AREA_STACKED_100,
'Clustered Bar': XL_CHART_TYPE.BAR_CLUSTERED,
'Stacked Bar': XL_CHART_TYPE.BAR_STACKED,
'100% Stacked Bar': XL_CHART_TYPE.BAR_STACKED_100,
'Clustered Column': XL_CHART_TYPE.COLUMN_CLUSTERED,
'Stacked Column': XL_CHART_TYPE.COLUMN_STACKED,
'100% Stacked Column': XL_CHART_TYPE.COLUMN_STACKED_100,
'Doughnut': XL_CHART_TYPE.DOUGHNUT,
'Exploded Doughnut': XL_CHART_TYPE.DOUGHNUT_EXPLODED,
'Line': XL_CHART_TYPE.LINE,
'Line with Markers': XL_CHART_TYPE.LINE_MARKERS,
'Line Markers Stacked': XL_CHART_TYPE.LINE_MARKERS_STACKED,
'100% Line Markers Stacked': XL_CHART_TYPE.LINE_MARKERS_STACKED_100,
'Line Stacked': XL_CHART_TYPE.LINE_STACKED,
'100% Line Stacked': XL_CHART_TYPE.LINE_STACKED_100,
'Pie': XL_CHART_TYPE.PIE,
'Exploded Pie': XL_CHART_TYPE.PIE_EXPLODED,
'Radar': XL_CHART_TYPE.RADAR,
'Filled Radar': XL_CHART_TYPE.RADAR_FILLED,
'Radar with markers': XL_CHART_TYPE.RADAR_MARKERS,
}[kind]
category_count, series_count = int(cats), int(sers)
category_source = ('Foo', 'Bar', 'Baz', 'Boo', 'Far', 'Faz')
series_value_source = count(1.1, 1.1)
chart_data = CategoryChartData()
chart_data.categories = category_source[:category_count]
for idx in range(series_count):
series_title = 'Series %d' % (idx+1)
series_values = tuple(islice(series_value_source, category_count))
chart_data.add_series(series_title, series_values)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when('I add a {bubble_type} chart having 2 series of 3 points each')
def when_I_add_a_bubble_chart_having_2_series_of_3_pts(context, bubble_type):
chart_type = getattr(XL_CHART_TYPE, bubble_type)
data = (
('Series 1', ((-0.1, 0.5, 1.0), (16.2, 0.0, 2.0), (8.0, -0.2, 3.0))),
('Series 2', ((12.4, 0.8, 4.0), (-7.5, 0.5, 5.0), (5.1, -0.5, 6.0))),
)
chart_data = BubbleChartData()
for series_data in data:
series_label, points = series_data
series = chart_data.add_series(series_label)
for point in points:
x, y, size = point
series.add_data_point(x, y, size)
context.chart = context.slide.shapes.add_chart(
chart_type, Inches(1), Inches(1), Inches(8), Inches(5), chart_data
).chart
@when('I assign {value} to chart.has_legend')
def when_I_assign_value_to_chart_has_legend(context, value):
new_value = {
'True': True,
'False': False,
}[value]
context.chart.has_legend = new_value
@when('I assign {value} to chart.has_title')
def when_I_assign_value_to_chart_has_title(context, value):
context.chart.has_title = {'True': True, 'False': False}[value]
@when('I assign {value} to chart_title.has_text_frame')
def when_I_assign_value_to_chart_title_has_text_frame(context, value):
context.chart_title.has_text_frame = {
'True': True,
'False': False
}[value]
@when('I replace its data with {cats} categories and {sers} series')
def when_I_replace_its_data_with_categories_and_series(context, cats, sers):
category_count, series_count = int(cats), int(sers)
category_source = ('Foo', 'Bar', 'Baz', 'Boo', 'Far', 'Faz')
series_value_source = count(1.1, 1.1)
chart_data = ChartData()
chart_data.categories = category_source[:category_count]
for idx in range(series_count):
series_title = 'New Series %d' % (idx+1)
series_values = tuple(islice(series_value_source, category_count))
chart_data.add_series(series_title, series_values)
context.chart.replace_data(chart_data)
@when('I replace its data with 3 series of 3 bubble points each')
def when_I_replace_its_data_with_3_series_of_three_bubble_pts_each(context):
chart_data = BubbleChartData()
for idx in range(3):
series_title = 'New Series %d' % (idx+1)
series = chart_data.add_series(series_title)
for jdx in range(3):
x, y, size = idx * 3 + jdx, idx * 2 + jdx, idx + jdx
series.add_data_point(x, y, size)
context.chart.replace_data(chart_data)
@when('I replace its data with 3 series of 3 points each')
def when_I_replace_its_data_with_3_series_of_three_points_each(context):
chart_data = XyChartData()
x = y = 0
for idx in range(3):
series_title = 'New Series %d' % (idx+1)
series = chart_data.add_series(series_title)
for jdx in range(3):
x, y = idx * 3 + jdx, idx * 2 + jdx
series.add_data_point(x, y)
context.chart.replace_data(chart_data)
# then ====================================================
@then('chart.category_axis is a {cls_name} object')
def then_chart_category_axis_is_a_cls_name_object(context, cls_name):
category_axis = context.chart.category_axis
type_name = type(category_axis).__name__
assert type_name == cls_name, 'got %s' % type_name
@then('chart.chart_title is a ChartTitle object')
def then_chart_chart_title_is_a_ChartTitle_object(context):
class_name = type(context.chart.chart_title).__name__
assert class_name == 'ChartTitle', 'got %s' % class_name
@then('chart.chart_type is {enum_member}')
def then_chart_chart_type_is_value(context, enum_member):
expected_value = getattr(XL_CHART_TYPE, enum_member)
chart = context.chart
assert chart.chart_type is expected_value, 'got %s' % chart.chart_type
@then('chart.has_legend is {value}')
def then_chart_has_legend_is_value(context, value):
expected_value = {
'True': True,
'False': False,
}[value]
chart = context.chart
assert chart.has_legend is expected_value
@then('chart.has_title is {value}')
def then_chart_has_title_is_value(context, value):
chart = context.chart
actual_value = chart.has_title
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('chart.legend is a legend object')
def then_chart_legend_is_a_legend_object(context):
chart = context.chart
assert isinstance(chart.legend, Legend)
@then('chart.series is a SeriesCollection object')
def then_chart_series_is_a_SeriesCollection_object(context):
type_name = type(context.chart.series).__name__
assert type_name == 'SeriesCollection', 'got %s' % type_name
@then('chart.value_axis is a ValueAxis object')
def then_chart_value_axis_is_a_ValueAxis_object(context):
value_axis = context.chart.value_axis
assert type(value_axis).__name__ == 'ValueAxis'
@then('chart_title.format is a ChartFormat object')
def then_chart_title_format_is_a_ChartFormat_object(context):
class_name = type(context.chart_title.format).__name__
assert class_name == 'ChartFormat', 'got %s' % class_name
@then('chart_title.format.fill is a FillFormat object')
def then_chart_title_format_fill_is_a_FillFormat_object(context):
class_name = type(context.chart_title.format.fill).__name__
assert class_name == 'FillFormat', 'got %s' % class_name
@then('chart_title.format.line is a LineFormat object')
def then_chart_title_format_line_is_a_LineFormat_object(context):
class_name = type(context.chart_title.format.line).__name__
assert class_name == 'LineFormat', 'got %s' % class_name
@then('chart_title.has_text_frame is {value}')
def then_chart_title_has_text_frame_is_value(context, value):
actual_value = context.chart_title.has_text_frame
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('chart_title.text_frame is a TextFrame object')
def then_chart_title_text_frame_is_a_TextFrame_object(context):
class_name = type(context.chart_title.text_frame).__name__
assert class_name == 'TextFrame', 'got %s' % class_name
@then('each series has a new name')
def then_each_series_has_a_new_name(context):
for series in context.chart.plots[0].series:
assert series.name.startswith('New ')
@then('each series has {count} values')
def then_each_series_has_count_values(context, count):
expected_count = int(count)
for series in context.chart.plots[0].series:
actual_value_count = len(series.values)
assert actual_value_count == expected_count
@then('len(chart.series) is {count}')
def then_len_chart_series_is_count(context, count):
expected_count = int(count)
assert len(context.chart.series) == expected_count
@then('the chart has an Excel data worksheet')
def then_the_chart_has_an_Excel_data_worksheet(context):
xlsx_part = context.chart._workbook.xlsx_part
assert isinstance(xlsx_part, EmbeddedXlsxPart)
@then('the chart has new chart data')
def then_the_chart_has_new_chart_data(context):
orig_xlsx_sha1 = context.xlsx_sha1
new_xlsx_sha1 = hashlib.sha1(
context.chart._workbook.xlsx_part.blob
).hexdigest()
assert new_xlsx_sha1 != orig_xlsx_sha1
|
smmosquera/serge
|
refs/heads/master
|
tools/template/startup.py
|
2
|
"""Main startup file for %(name)s"""
from optparse import OptionParser
import sys
import os
if sys.version_info[0] == 3:
print 'Python 3 is not supported'
sys.exit(1)
elif sys.version_info[1] <= 5:
print 'Python 2.6+ is required'
sys.exit(1)
import serge.common
%(pymunk_code)s
parser = OptionParser()
parser.add_option("-f", "--framerate", dest="framerate", default=60, type="int",
help="framerate to use for the engine")
parser.add_option("-l", "--log", dest="log", default=40, type="int",
help="logging level")
parser.add_option("-p", "--profile", dest="profile", default=False, action="store_true",
help="profile the game for speed")
parser.add_option("-d", "--debug", dest="debug", default=False, action="store_true",
help="run in debug mode")
parser.add_option("-c", "--cheat", dest="cheat", default=False, action="store_true",
help="run in cheat mode - all levels are available right away")
parser.add_option("-m", "--music-off", dest="musicoff", default=False, action="store_true",
help="start with music silenced")
parser.add_option("-S", "--straight", dest="straight", default=False, action="store_true",
help="go straight into game, bypassing start screen")
parser.add_option("-s", "--screenshot", dest="screenshot", default=False, action="store_true",
help="allow screenshots of the screen by pressing 's' during gameplay")
parser.add_option("-t", "--theme", dest="theme", default='', type='str',
help="settings (a=b,c=d) for the theme")
parser.add_option("-D", "--drop", dest="drop", default=False, action="store_true",
help="drop into debug mode on an unhandled error")
(options, args) = parser.parse_args()
serge.common.logger.setLevel(options.log)
import game.main
if options.drop:
serge.common.installDebugHook()
if options.profile:
import cProfile, pstats
cProfile.run('game.main.main(options, args)', 'profile')
p = pstats.Stats('profile')
print p.sort_stats('cumulative').print_stats(100)
else:
game.main.main(options, args)
|
lewer/gensim
|
refs/heads/develop
|
gensim/models/rpmodel.py
|
70
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
import logging
import itertools
import numpy
import scipy
from gensim import interfaces, matutils, utils
logger = logging.getLogger('gensim.models.rpmodel')
class RpModel(interfaces.TransformationABC):
"""
Objects of this class allow building and maintaining a model for Random Projections
(also known as Random Indexing). For theoretical background on RP, see:
Kanerva et al.: "Random indexing of text samples for Latent Semantic Analysis."
The main methods are:
1. constructor, which creates the random projection matrix
2. the [] method, which transforms a simple count representation into the TfIdf
space.
>>> rp = RpModel(corpus)
>>> print(rp[some_doc])
>>> rp.save('/tmp/foo.rp_model')
Model persistency is achieved via its load/save methods.
"""
def __init__(self, corpus, id2word=None, num_topics=300):
"""
`id2word` is a mapping from word ids (integers) to words (strings). It is
used to determine the vocabulary size, as well as for debugging and topic
printing. If not set, it will be determined from the corpus.
"""
self.id2word = id2word
self.num_topics = num_topics
if corpus is not None:
self.initialize(corpus)
def __str__(self):
return "RpModel(num_terms=%s, num_topics=%s)" % (self.num_terms, self.num_topics)
def initialize(self, corpus):
"""
Initialize the random projection matrix.
"""
if self.id2word is None:
logger.info("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 1 + max([-1] + self.id2word.keys())
shape = self.num_topics, self.num_terms
logger.info("constructing %s random matrix" % str(shape))
# Now construct the projection matrix itself.
# Here i use a particular form, derived in "Achlioptas: Database-friendly random projection",
# and his (1) scenario of Theorem 1.1 in particular (all entries are +1/-1).
randmat = 1 - 2 * numpy.random.binomial(1, 0.5, shape) # convert from 0/1 to +1/-1
self.projection = numpy.asfortranarray(randmat, dtype=numpy.float32) # convert from int32 to floats, for faster multiplications
def __getitem__(self, bow):
"""
Return RP representation of the input vector and/or corpus.
"""
# if the input vector is in fact a corpus, return a transformed corpus as result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
vec = matutils.sparse2full(bow, self.num_terms).reshape(self.num_terms, 1) / numpy.sqrt(self.num_topics)
vec = numpy.asfortranarray(vec, dtype=numpy.float32)
topic_dist = numpy.dot(self.projection, vec) # (k, d) * (d, 1) = (k, 1)
return [(topicid, float(topicvalue)) for topicid, topicvalue in enumerate(topic_dist.flat)
if numpy.isfinite(topicvalue) and not numpy.allclose(topicvalue, 0.0)]
def __setstate__(self, state):
"""
This is a hack to work around a bug in numpy, where a FORTRAN-order array
unpickled from disk segfaults on using it.
"""
self.__dict__ = state
if self.projection is not None:
self.projection = self.projection.copy('F') # simply making a fresh copy fixes the broken array
#endclass RpModel
|
infobloxopen/netmri-toolkit
|
refs/heads/master
|
Python/getsubinterfaces.py
|
1
|
#Device subinterface data retrieval script. Copyright Ingmar Van Glabbeek ingmar@infoblox.com
#Licensed under Apache-2.0
#This script will pull all devices of a given device group and then list the devices management ip as well as the available management ips.
#By default it saves the output to "deviceinterfacedump.json"
#Tested on NetMRI 7.3.1 and 7.3.2
#Modules required:
import getpass
import requests
import json
import urllib3
from requests.auth import HTTPBasicAuth
from http.client import responses
import time
#You can hardcode credentials here, it's not safe. Don't do it.
#hostname = "netmri.infoblox.com"
#username = "admin"
#password = "infoblox"
#urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def main():
cookie_host = wapi_connect()
#print(cookie_host)
devicelist = getdevices(cookie_host)
filtered_data = devicedata(devicelist)
#uncomment next line if you want to write to console
#print(json.dumps(filtered_data,indent=4, sort_keys=True))
filename = open("deviceinterfacedump.json","w")
filename.write(json.dumps(filtered_data,indent=4))
filename.close()
print("Data retrieved successfully")
def devicedata(devicelist):
listload = json.loads(devicelist)
data = []
for e in listload['rows']:
if not e["if_addrs"]:
device = {"DeviceID":e["DeviceID"],"DeviceName":e["DeviceName"],"DeviceType":e["DeviceType"],"DeviceIPDotted":e["DeviceIPDotted"],"Other InterfaceIP":["none"]}
data.append(device)
else:
device = {"DeviceID": e['DeviceID'], "DeviceName": e["DeviceName"], "DeviceType": e["DeviceType"],
"DeviceIPDotted": e["DeviceIPDotted"], "Other InterfaceIP":[]}
for f in e["if_addrs"]:
i=1
interface = {"InterfaceIP":f["ifIPDotted"], "Interfacename":f["ifName"]}
device["Other InterfaceIP"].insert(i,interface)
data.append(device)
i=i+1
dataftw=json.dumps(data)
returndata=json.loads(dataftw)
return returndata
def getdevices(cookie_host):
if not cookie_host:
print("No connection established.")
return 0
#get current time
ts = time.time()
hostname=cookie_host[1]
#limits number of results
limit = input("Limit to this number of devices: ")
get_url = "https://" + hostname + "/api/3.3/device_groups/index"
response = requests.get(get_url, cookies=cookie_host[0], verify=False)
d=response.text
dl=json.loads(d)
print("List of DeviceGroups")
for e in dl["device_groups"]:
dglist={"GroupName":e["GroupName"],"GroupID":e["GroupID"]}
print(dglist)
devicegroup = input("Based on the output specify the devicegroup ID by its ID: ")
get_url = "https://" + hostname + "/api/3.3/discovery_statuses/static/current.extjs"
querystring = {"_dc": ts, "filename": "recent_activity.csv", "filter": "null", "limit": limit,
"GroupID": devicegroup}
response = requests.get(get_url, cookies=cookie_host[0], verify=False, params=querystring)
t=response.text
print("We are fetching a list of " + str(limit) +
" devices for devicegroup " + str(devicegroup) + ".")
return(t)
def wapi_connect():
hostname = input("Enter the NetMRI hostname or IP: ")
username = input("Enter your NetMRI username: ")
password = getpass.getpass("Enter your Password: ")
https_val = input("Disable SSL validations?(y/n) ")
if https_val in ("y", "Y"):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
print("SSL validation disabled")
if https_val in ("n", "N"):
print("SSL validation enabled")
login_url = "https://" + hostname + "/api/3.3/device_groups/index"
print("logging in to " + hostname)
try:
login_result = requests.get(
login_url,
auth=HTTPBasicAuth(username, password),
timeout=5,
verify=False)
except requests.exceptions.ConnectTimeout as e:
print("Connection time out after 5 seconds.")
exit(1)
except requests.exceptions.ConnectionError as e:
print("No route to host " + hostname)
exit(1)
if has_error(login_result):
exit(1)
else:
print("Login OK")
return(login_result.cookies,hostname)
def has_error(_result):
if _result.status_code == 200:
return 0
elif _result.status_code == 201:
return 0
try:
err_text = _result.json()['text']
except KeyError as e:
err_text = "Response contains no error text"
except json.decoder.JSONDecodeError as e:
err_text = "No JSON Response"
# print out the HTTP response code, description, and error text
http_code = _result.status_code
http_desc = responses[http_code]
print("HTTP Code [%3d] %s. %s" % (http_code, http_desc, err_text))
return 1
if __name__ == "__main__":
main()
|
FusionSP/android_external_chromium_org
|
refs/heads/lp5.1
|
components/policy/tools/syntax_check_policy_template_json.py
|
34
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
Checks a policy_templates.json file for conformity to its syntax specification.
'''
import json
import optparse
import os
import re
import sys
LEADING_WHITESPACE = re.compile('^([ \t]*)')
TRAILING_WHITESPACE = re.compile('.*?([ \t]+)$')
# Matches all non-empty strings that contain no whitespaces.
NO_WHITESPACE = re.compile('[^\s]+$')
# Convert a 'type' to the schema types it may be converted to.
# The 'dict' type represents structured JSON data, and can be converted
# to an 'object' or an 'array'.
TYPE_TO_SCHEMA = {
'int': [ 'integer' ],
'list': [ 'array' ],
'dict': [ 'object', 'array' ],
'main': [ 'boolean' ],
'string': [ 'string' ],
'int-enum': [ 'integer' ],
'string-enum': [ 'string' ],
'string-enum-list': [ 'array' ],
'external': [ 'object' ],
}
# List of boolean policies that have been introduced with negative polarity in
# the past and should not trigger the negative polarity check.
LEGACY_INVERTED_POLARITY_WHITELIST = [
'DeveloperToolsDisabled',
'DeviceAutoUpdateDisabled',
'Disable3DAPIs',
'DisableAuthNegotiateCnameLookup',
'DisablePluginFinder',
'DisablePrintPreview',
'DisableSafeBrowsingProceedAnyway',
'DisableScreenshots',
'DisableSpdy',
'DisableSSLRecordSplitting',
'DriveDisabled',
'DriveDisabledOverCellular',
'ExternalStorageDisabled',
'SavingBrowserHistoryDisabled',
'SyncDisabled',
]
class PolicyTemplateChecker(object):
def __init__(self):
self.error_count = 0
self.warning_count = 0
self.num_policies = 0
self.num_groups = 0
self.num_policies_in_groups = 0
self.options = None
self.features = []
def _Error(self, message, parent_element=None, identifier=None,
offending_snippet=None):
self.error_count += 1
error = ''
if identifier is not None and parent_element is not None:
error += 'In %s %s: ' % (parent_element, identifier)
print error + 'Error: ' + message
if offending_snippet is not None:
print ' Offending:', json.dumps(offending_snippet, indent=2)
def _CheckContains(self, container, key, value_type,
optional=False,
parent_element='policy',
container_name=None,
identifier=None,
offending='__CONTAINER__',
regexp_check=None):
'''
Checks |container| for presence of |key| with value of type |value_type|.
If |value_type| is string and |regexp_check| is specified, then an error is
reported when the value does not match the regular expression object.
|value_type| can also be a list, if more than one type is supported.
The other parameters are needed to generate, if applicable, an appropriate
human-readable error message of the following form:
In |parent_element| |identifier|:
(if the key is not present):
Error: |container_name| must have a |value_type| named |key|.
Offending snippet: |offending| (if specified; defaults to |container|)
(if the value does not have the required type):
Error: Value of |key| must be a |value_type|.
Offending snippet: |container[key]|
Returns: |container[key]| if the key is present, None otherwise.
'''
if identifier is None:
try:
identifier = container.get('name')
except:
self._Error('Cannot access container name of "%s".' % container_name)
return None
if container_name is None:
container_name = parent_element
if offending == '__CONTAINER__':
offending = container
if key not in container:
if optional:
return
else:
self._Error('%s must have a %s "%s".' %
(container_name.title(), value_type.__name__, key),
container_name, identifier, offending)
return None
value = container[key]
value_types = value_type if isinstance(value_type, list) else [ value_type ]
if not any(isinstance(value, type) for type in value_types):
self._Error('Value of "%s" must be one of [ %s ].' %
(key, ', '.join([type.__name__ for type in value_types])),
container_name, identifier, value)
if str in value_types and regexp_check and not regexp_check.match(value):
self._Error('Value of "%s" must match "%s".' %
(key, regexp_check.pattern),
container_name, identifier, value)
return value
def _AddPolicyID(self, id, policy_ids, policy):
'''
Adds |id| to |policy_ids|. Generates an error message if the
|id| exists already; |policy| is needed for this message.
'''
if id in policy_ids:
self._Error('Duplicate id', 'policy', policy.get('name'),
id)
else:
policy_ids.add(id)
def _CheckPolicyIDs(self, policy_ids):
'''
Checks a set of policy_ids to make sure it contains a continuous range
of entries (i.e. no holes).
Holes would not be a technical problem, but we want to ensure that nobody
accidentally omits IDs.
'''
for i in range(len(policy_ids)):
if (i + 1) not in policy_ids:
self._Error('No policy with id: %s' % (i + 1))
def _CheckPolicySchema(self, policy, policy_type):
'''Checks that the 'schema' field matches the 'type' field.'''
self._CheckContains(policy, 'schema', dict)
if isinstance(policy.get('schema'), dict):
self._CheckContains(policy['schema'], 'type', str)
schema_type = policy['schema'].get('type')
if schema_type not in TYPE_TO_SCHEMA[policy_type]:
self._Error('Schema type must match the existing type for policy %s' %
policy.get('name'))
# Checks that boolean policies are not negated (which makes them harder to
# reason about).
if (schema_type == 'boolean' and
'disable' in policy.get('name').lower() and
policy.get('name') not in LEGACY_INVERTED_POLARITY_WHITELIST):
self._Error(('Boolean policy %s uses negative polarity, please make ' +
'new boolean policies follow the XYZEnabled pattern. ' +
'See also http://crbug.com/85687') % policy.get('name'))
def _CheckPolicy(self, policy, is_in_group, policy_ids):
if not isinstance(policy, dict):
self._Error('Each policy must be a dictionary.', 'policy', None, policy)
return
# There should not be any unknown keys in |policy|.
for key in policy:
if key not in ('name', 'type', 'caption', 'desc', 'device_only',
'supported_on', 'label', 'policies', 'items',
'example_value', 'features', 'deprecated', 'future',
'id', 'schema', 'max_size'):
self.warning_count += 1
print ('In policy %s: Warning: Unknown key: %s' %
(policy.get('name'), key))
# Each policy must have a name.
self._CheckContains(policy, 'name', str, regexp_check=NO_WHITESPACE)
# Each policy must have a type.
policy_types = ('group', 'main', 'string', 'int', 'list', 'int-enum',
'string-enum', 'string-enum-list', 'dict', 'external')
policy_type = self._CheckContains(policy, 'type', str)
if policy_type not in policy_types:
self._Error('Policy type must be one of: ' + ', '.join(policy_types),
'policy', policy.get('name'), policy_type)
return # Can't continue for unsupported type.
# Each policy must have a caption message.
self._CheckContains(policy, 'caption', str)
# Each policy must have a description message.
self._CheckContains(policy, 'desc', str)
# If 'label' is present, it must be a string.
self._CheckContains(policy, 'label', str, True)
# If 'deprecated' is present, it must be a bool.
self._CheckContains(policy, 'deprecated', bool, True)
# If 'future' is present, it must be a bool.
self._CheckContains(policy, 'future', bool, True)
if policy_type == 'group':
# Groups must not be nested.
if is_in_group:
self._Error('Policy groups must not be nested.', 'policy', policy)
# Each policy group must have a list of policies.
policies = self._CheckContains(policy, 'policies', list)
# Check sub-policies.
if policies is not None:
for nested_policy in policies:
self._CheckPolicy(nested_policy, True, policy_ids)
# Groups must not have an |id|.
if 'id' in policy:
self._Error('Policies of type "group" must not have an "id" field.',
'policy', policy)
# Statistics.
self.num_groups += 1
else: # policy_type != group
# Each policy must have a protobuf ID.
id = self._CheckContains(policy, 'id', int)
self._AddPolicyID(id, policy_ids, policy)
# 'schema' is the new 'type'.
# TODO(joaodasilva): remove the 'type' checks once 'schema' is used
# everywhere.
self._CheckPolicySchema(policy, policy_type)
# Each policy must have a supported_on list.
supported_on = self._CheckContains(policy, 'supported_on', list)
if supported_on is not None:
for s in supported_on:
if not isinstance(s, str):
self._Error('Entries in "supported_on" must be strings.', 'policy',
policy, supported_on)
# Each policy must have a 'features' dict.
features = self._CheckContains(policy, 'features', dict)
# All the features must have a documenting message.
if features:
for feature in features:
if not feature in self.features:
self._Error('Unknown feature "%s". Known features must have a '
'documentation string in the messages dictionary.' %
feature, 'policy', policy.get('name', policy))
# All user policies must have a per_profile feature flag.
if (not policy.get('device_only', False) and
not policy.get('deprecated', False) and
not filter(re.compile('^chrome_frame:.*').match, supported_on)):
self._CheckContains(features, 'per_profile', bool,
container_name='features',
identifier=policy.get('name'))
# All policies must declare whether they allow changes at runtime.
self._CheckContains(features, 'dynamic_refresh', bool,
container_name='features',
identifier=policy.get('name'))
# Each policy must have an 'example_value' of appropriate type.
if policy_type == 'main':
value_type = item_type = bool
elif policy_type in ('string', 'string-enum'):
value_type = item_type = str
elif policy_type in ('int', 'int-enum'):
value_type = item_type = int
elif policy_type in ('list', 'string-enum-list'):
value_type = list
item_type = str
elif policy_type == 'external':
value_type = item_type = dict
elif policy_type == 'dict':
value_type = item_type = [ dict, list ]
else:
raise NotImplementedError('Unimplemented policy type: %s' % policy_type)
self._CheckContains(policy, 'example_value', value_type)
# Statistics.
self.num_policies += 1
if is_in_group:
self.num_policies_in_groups += 1
if policy_type in ('int-enum', 'string-enum', 'string-enum-list'):
# Enums must contain a list of items.
items = self._CheckContains(policy, 'items', list)
if items is not None:
if len(items) < 1:
self._Error('"items" must not be empty.', 'policy', policy, items)
for item in items:
# Each item must have a name.
# Note: |policy.get('name')| is used instead of |policy['name']|
# because it returns None rather than failing when no key called
# 'name' exists.
self._CheckContains(item, 'name', str, container_name='item',
identifier=policy.get('name'),
regexp_check=NO_WHITESPACE)
# Each item must have a value of the correct type.
self._CheckContains(item, 'value', item_type, container_name='item',
identifier=policy.get('name'))
# Each item must have a caption.
self._CheckContains(item, 'caption', str, container_name='item',
identifier=policy.get('name'))
if policy_type == 'external':
# Each policy referencing external data must specify a maximum data size.
self._CheckContains(policy, 'max_size', int)
def _CheckMessage(self, key, value):
# |key| must be a string, |value| a dict.
if not isinstance(key, str):
self._Error('Each message key must be a string.', 'message', key, key)
return
if not isinstance(value, dict):
self._Error('Each message must be a dictionary.', 'message', key, value)
return
# Each message must have a desc.
self._CheckContains(value, 'desc', str, parent_element='message',
identifier=key)
# Each message must have a text.
self._CheckContains(value, 'text', str, parent_element='message',
identifier=key)
# There should not be any unknown keys in |value|.
for vkey in value:
if vkey not in ('desc', 'text'):
self.warning_count += 1
print 'In message %s: Warning: Unknown key: %s' % (key, vkey)
def _LeadingWhitespace(self, line):
match = LEADING_WHITESPACE.match(line)
if match:
return match.group(1)
return ''
def _TrailingWhitespace(self, line):
match = TRAILING_WHITESPACE.match(line)
if match:
return match.group(1)
return ''
def _LineError(self, message, line_number):
self.error_count += 1
print 'In line %d: Error: %s' % (line_number, message)
def _LineWarning(self, message, line_number):
self.warning_count += 1
print ('In line %d: Warning: Automatically fixing formatting: %s'
% (line_number, message))
def _CheckFormat(self, filename):
if self.options.fix:
fixed_lines = []
with open(filename) as f:
indent = 0
line_number = 0
for line in f:
line_number += 1
line = line.rstrip('\n')
# Check for trailing whitespace.
trailing_whitespace = self._TrailingWhitespace(line)
if len(trailing_whitespace) > 0:
if self.options.fix:
line = line.rstrip()
self._LineWarning('Trailing whitespace.', line_number)
else:
self._LineError('Trailing whitespace.', line_number)
if self.options.fix:
if len(line) == 0:
fixed_lines += ['\n']
continue
else:
if line == trailing_whitespace:
# This also catches the case of an empty line.
continue
# Check for correct amount of leading whitespace.
leading_whitespace = self._LeadingWhitespace(line)
if leading_whitespace.count('\t') > 0:
if self.options.fix:
leading_whitespace = leading_whitespace.replace('\t', ' ')
line = leading_whitespace + line.lstrip()
self._LineWarning('Tab character found.', line_number)
else:
self._LineError('Tab character found.', line_number)
if line[len(leading_whitespace)] in (']', '}'):
indent -= 2
if line[0] != '#': # Ignore 0-indented comments.
if len(leading_whitespace) != indent:
if self.options.fix:
line = ' ' * indent + line.lstrip()
self._LineWarning('Indentation should be ' + str(indent) +
' spaces.', line_number)
else:
self._LineError('Bad indentation. Should be ' + str(indent) +
' spaces.', line_number)
if line[-1] in ('[', '{'):
indent += 2
if self.options.fix:
fixed_lines.append(line + '\n')
# If --fix is specified: backup the file (deleting any existing backup),
# then write the fixed version with the old filename.
if self.options.fix:
if self.options.backup:
backupfilename = filename + '.bak'
if os.path.exists(backupfilename):
os.remove(backupfilename)
os.rename(filename, backupfilename)
with open(filename, 'w') as f:
f.writelines(fixed_lines)
def Main(self, filename, options):
try:
with open(filename) as f:
data = eval(f.read())
except:
import traceback
traceback.print_exc(file=sys.stdout)
self._Error('Invalid Python/JSON syntax.')
return 1
if data == None:
self._Error('Invalid Python/JSON syntax.')
return 1
self.options = options
# First part: check JSON structure.
# Check (non-policy-specific) message definitions.
messages = self._CheckContains(data, 'messages', dict,
parent_element=None,
container_name='The root element',
offending=None)
if messages is not None:
for message in messages:
self._CheckMessage(message, messages[message])
if message.startswith('doc_feature_'):
self.features.append(message[12:])
# Check policy definitions.
policy_definitions = self._CheckContains(data, 'policy_definitions', list,
parent_element=None,
container_name='The root element',
offending=None)
if policy_definitions is not None:
policy_ids = set()
for policy in policy_definitions:
self._CheckPolicy(policy, False, policy_ids)
self._CheckPolicyIDs(policy_ids)
# Second part: check formatting.
self._CheckFormat(filename)
# Third part: summary and exit.
print ('Finished checking %s. %d errors, %d warnings.' %
(filename, self.error_count, self.warning_count))
if self.options.stats:
if self.num_groups > 0:
print ('%d policies, %d of those in %d groups (containing on '
'average %.1f policies).' %
(self.num_policies, self.num_policies_in_groups, self.num_groups,
(1.0 * self.num_policies_in_groups / self.num_groups)))
else:
print self.num_policies, 'policies, 0 policy groups.'
if self.error_count > 0:
return 1
return 0
def Run(self, argv, filename=None):
parser = optparse.OptionParser(
usage='usage: %prog [options] filename',
description='Syntax check a policy_templates.json file.')
parser.add_option('--fix', action='store_true',
help='Automatically fix formatting.')
parser.add_option('--backup', action='store_true',
help='Create backup of original file (before fixing).')
parser.add_option('--stats', action='store_true',
help='Generate statistics.')
(options, args) = parser.parse_args(argv)
if filename is None:
if len(args) != 2:
parser.print_help()
sys.exit(1)
filename = args[1]
return self.Main(filename, options)
if __name__ == '__main__':
sys.exit(PolicyTemplateChecker().Run(sys.argv))
|
xyb/micropython
|
refs/heads/master
|
tests/basics/dict_setdefault.py
|
116
|
d = {}
print(d.setdefault(1))
print(d.setdefault(1))
print(d.setdefault(5, 42))
print(d.setdefault(5, 1))
print(d[1])
print(d[5])
d.pop(5)
print(d.setdefault(5, 1))
print(d[1])
print(d[5])
|
bernardopires/django-tenant-schemas
|
refs/heads/master
|
dts_test_project/dts_test_app/migrations/0003_test_add_db_index.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('dts_test_app', '0002_test_drop_unique'),
]
operations = [
migrations.AddField(
model_name='DummyModel',
name='indexed_value',
field=models.CharField(max_length=255, db_index=True),
),
]
|
wyg3958/django-cms
|
refs/heads/develop
|
cms/extensions/admin.py
|
51
|
from cms.models import Page, Title
from django.contrib import admin
from django.contrib.admin.options import csrf_protect_m
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
class ExtensionAdmin(admin.ModelAdmin):
change_form_template = "admin/cms/extensions/change_form.html"
add_form_template = "admin/cms/extensions/change_form.html"
class PageExtensionAdmin(ExtensionAdmin):
def save_model(self, request, obj, form, change):
if not change and 'extended_object' in request.GET:
obj.extended_object = Page.objects.get(pk=request.GET['extended_object'])
page = Page.objects.get(pk=request.GET['extended_object'])
else:
page = obj.extended_object
if not page.has_change_permission(request):
raise PermissionDenied()
super(PageExtensionAdmin, self).save_model(request, obj, form, change)
def delete_model(self, request, obj):
if not obj.extended_object.has_change_permission(request):
raise PermissionDenied()
obj.delete()
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
def get_queryset(self, request):
return super(PageExtensionAdmin, self).get_queryset(request).filter(extended_object__publisher_is_draft=True)
@csrf_protect_m
def add_view(self, request, form_url='', extra_context=None):
"""
Check if the page already has an extension object. If so, redirect to edit view instead.
"""
extended_object_id = request.GET.get('extended_object', False)
if extended_object_id:
try:
page = Page.objects.get(pk=extended_object_id)
extension = self.model.objects.get(extended_object=page)
opts = self.model._meta
change_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(extension.pk,),
current_app=self.admin_site.name)
return HttpResponseRedirect(change_url)
except self.model.DoesNotExist:
pass
return super(ExtensionAdmin, self).add_view(request, form_url, extra_context)
class TitleExtensionAdmin(ExtensionAdmin):
def save_model(self, request, obj, form, change):
if not change and 'extended_object' in request.GET:
obj.extended_object = Title.objects.get(pk=request.GET['extended_object'])
title = Title.objects.get(pk=request.GET['extended_object'])
else:
title = obj.extended_object
if not title.page.has_change_permission(request):
raise PermissionDenied()
super(TitleExtensionAdmin, self).save_model(request, obj, form, change)
def delete_model(self, request, obj):
if not obj.extended_object.page.has_change_permission(request):
raise PermissionDenied()
obj.delete()
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
def get_queryset(self, request):
return super(TitleExtensionAdmin, self).get_queryset(request).filter(extended_object__page__publisher_is_draft=True)
@csrf_protect_m
def add_view(self, request, form_url='', extra_context=None):
"""
Check if the page already has an extension object. If so, redirect to edit view instead.
"""
extended_object_id = request.GET.get('extended_object', False)
if extended_object_id:
try:
title = Title.objects.get(pk=extended_object_id)
extension = self.model.objects.get(extended_object=title)
opts = self.model._meta
change_url = reverse('admin:%s_%s_change' %
(opts.app_label, opts.model_name),
args=(extension.pk,),
current_app=self.admin_site.name)
return HttpResponseRedirect(change_url)
except self.model.DoesNotExist:
pass
return super(ExtensionAdmin, self).add_view(request, form_url, extra_context)
|
adobe-flash/avmplus
|
refs/heads/master
|
build/buildbot/master/custom/buildbot_ext/steps/shellAddons.py
|
8
|
# -*- test-case-name: buildbot.test.test_steps,buildbot.test.test_properties -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from buildbot.steps.shell import ShellCommand
from buildbot.steps.transfer import FileDownload
from buildbot.status.builder import SUCCESS, FAILURE, SKIPPED, WARNINGS
from buildbot.process.buildstep import LoggingBuildStep, RemoteShellCommand
import buildbot.status.builder
from twisted.python import log
import re
class BaseShellCommand(ShellCommand):
messages=[]
script_status=""
def __init__(self, **kwargs):
ShellCommand.__init__(self, **kwargs)
self.messages=[]
self.script_status=""
def start(self):
res="SUCCESS"
for result in self.build.results:
if result == WARNINGS and res == "SUCCESS":
res = "WARNINGS"
if result == FAILURE:
res = "FAILURE"
self.build.setProperty("status", res, "BaseShellCommand")
ShellCommand.start(self)
def createSummary(self, log):
lines=log.readlines()
for line in lines:
if line.startswith('url:'):
items=line.split()
url=items[1]
items=items[2:]
desc=''
for item in items:
desc="%s %s" % (desc,item)
self.addURL(desc, url)
if line.startswith("message:"):
message = line[len("message:"):].strip()
self.messages.append(message)
if line.startswith("buildbot_status:"):
# valid values: [ SUCCESS | FAILURE | WARNINGS ]
self.script_status = line[len("buildbot_status:"):].strip()
def getText(self, cmd, results):
text=ShellCommand.getText(self,cmd,results)
for message in self.messages:
text.append('%s' % message)
return text
def evaluateCommand(self,cmd):
if cmd.rc != 0:
return buildbot.status.builder.FAILURE
if self.script_status != "":
if self.script_status == "SUCCESS":
return buildbot.status.builder.SUCCESS
elif self.script_status == "FAILURE":
return buildbot.status.builder.FAILURE
elif self.script_status == "WARNINGS":
return buildbot.status.builder.WARNINGS
else:
# This is an unknown status, FAIL the step so that it is investigated
return buildbot.status.builder.FAILURE
return buildbot.status.builder.SUCCESS
class PerformanceShellCommand(ShellCommand):
versions=[]
numtests=0
def createSummary(self,log1):
self.versions=[]
self.numtests=0
lines=log1.readlines()
for line in lines:
if line.startswith('installing'):
version=line.split()[1]
self.versions.append(version)
try:
line.index('running test')
self.numtests += 1
except:
self.numtests+=0
self.versions.sort()
def getText(self,cmd,results):
text=ShellCommand.getText(self,cmd,results)
for version in self.versions:
text.append('tested player %s' % version)
text.append(' ran %d tests' % self.numtests)
return text
class SizeReportShellCommand(BaseShellCommand):
sizeBytes = '-'
sizeKBytes = '-'
sizeExtra = '-'
def createSummary(self,log1):
BaseShellCommand.createSummary(self,log1)
lines=log1.readlines()
for line in lines:
if line.startswith('size_bytes'):
self.sizeBytes=line.split()[1]
if line.startswith('size_K')>0:
self.sizeKBytes=line.split()[1]
if line.startswith('size_extra')>0:
self.sizeExtra=line.split()[1]
def getText(self, cmd, results):
text=BaseShellCommand.getText(self,cmd,results)
sz="%.2f" % (float(self.sizeKBytes)-1+float(self.sizeExtra)/1024.0)
text.append("%s K %d bytes" % (sz,int(self.sizeBytes)))
return text
class BuildShellCommand(BaseShellCommand):
def createSummary(self,log1):
BaseShellCommand.createSummary(self, log1)
class PerfShellCommand(BaseShellCommand):
perfchange = '-'
def createSummary(self,log1):
BaseShellCommand.createSummary(self, log1)
lines=log1.readlines()
for line in lines:
if line.startswith('perfchange:'):
items=line.split()
try:
self.perfchange=float(items[1][:-1])
except:
self.perfchange="error"
def getText(self, cmd, results):
text=BaseShellCommand.getText(self,cmd,results)
text.append("Performance change: %s%%\n" % self.perfchange)
return text
def evaluateCommand(self,cmd):
if cmd.rc != 0:
return buildbot.status.builder.FAILURE
if self.perfchange > -3:
return buildbot.status.builder.SUCCESS
elif self.perfchange > -10:
return buildbot.status.builder.WARNINGS
else:
return buildbot.status.builder.FAILURE
class PerfNoColorShellCommand(BaseShellCommand):
perfchange = '-'
def createSummary(self,log1):
BaseShellCommand.createSummary(self, log1)
lines=log1.readlines()
for line in lines:
if line.startswith('perfchange:'):
items=line.split()
try:
self.perfchange=float(items[1][:-1])
except:
self.perfchange="error"
def getText(self, cmd, results):
text=BaseShellCommand.getText(self,cmd,results)
text.append("Performance change: %s%%\n" % self.perfchange)
return text
class TestSuiteShellCommand(BaseShellCommand):
passes = 0
fails = 0
unexpectedpasses = 0
expectedfails = 0
skipped = 0
asserts = 0
def createSummary(self,log1):
BaseShellCommand.createSummary(self,log1)
lines=log1.readlines()
for line in lines:
if line.find('total passes')>-1:
fields=line.split()
self.passes=int(fields[4])
self.unexpectedpasses=int(fields[7])
if line.find('total failures')>-1:
fields=line.split()
self.fails=int(fields[4])
self.expectedfails=int(fields[7])
if line.startswith('unexpected'):
fields=line.split()
try:
self.unexpectedpasses=int(fields[3])
except:
print("Error parsing unexpected passes")
if line.startswith('expected'):
fields=line.split()
try:
self.expectedfails=int(fields[3])
except:
print("Error parsing expected failures")
if line.startswith('failures'):
fields=line.split()
self.fails=int(fields[2])
if line.startswith('passes'):
fields=line.split()
self.passes=int(fields[2])
if line.startswith('tests skipped'):
fields=line.split()
self.skipped=int(fields[3])
if line.startswith('assertions'):
fields=line.split()
self.asserts=int(fields[2])
def evaluateCommand(self,cmd):
if cmd.rc != 0:
return buildbot.status.builder.FAILURE
if self.fails>0 or self.unexpectedpasses>0:
return buildbot.status.builder.FAILURE
if self.passes==0 and self.fails==0 and self.unexpectedpasses==0:
return buildbot.status.builder.FAILURE
# Before we say it was a success, check to see if there were assertions.
# This will only get checked if the above have already passed, this is
# the last check prior to passing the step.
if self.asserts>0:
# Treat assertions as a warning
return buildbot.status.builder.FAILURE
else:
return buildbot.status.builder.SUCCESS
def getText(self, cmd, results):
text=BaseShellCommand.getText(self,cmd,results)
text.append("test results")
text.append("passes:%d </br>" % self.passes)
text.append("failures:%d</br>" % self.fails)
text.append("skipped:%d</br>" % self.skipped)
text.append("unexp pass:%d</br>" % self.unexpectedpasses)
text.append("exp fails:%d</br>" % self.expectedfails)
text.append("assertions:%d</br>" % self.asserts)
return text
class BuildShellCheckCommand(BaseShellCommand):
# Use this if you wish to stop the build entirely on failure
haltOnFailure = True
def createSummary(self,log1):
BaseShellCommand.createSummary(self, log1)
def parseSendchangeArguments(args):
"""This function parses the arguments that the Buildbot patch uploader
sends to Buildbot via the "changed files". It takes an argument of a
list of files and returns a dictionary with key/value pairs
"""
parsedArgs = {}
for arg in args:
try:
(key, value) = arg.split(":", 1)
value = value.lstrip().rstrip()
parsedArgs[key] = value
except:
pass
return parsedArgs
class BuildRequestDownload(FileDownload):
"""This step reads a Change for a filename and downloads it to the slave.
"""
haltOnFailure = True
def __init__(self, isOptional=False, patchDir=".", **kwargs):
"""arguments:
@type patchDir: string
@param patchDir: The directory on the master that holds the patches
This directory is relative to the base buildmaster
directory.
ie. /home/buildmaster/project
Defaults to '.'
'workdir' is assumed to be 'build' and should be passed if it is
anything else.
'isOptional' is assumed to be False; if the patch is optional, pass True.
"""
self.patchDir = patchDir
self.isOptional = isOptional
# mastersrc and slavedest get overridden in start()
if not 'workdir' in kwargs:
kwargs['workdir'] = "build"
FileDownload.__init__(self, mastersrc=".", slavedest=".", **kwargs)
def start(self):
changes = self.step_status.build.getChanges()
if len(changes) < 1:
return SKIPPED
args = parseSendchangeArguments(changes[0].files)
if not 'infoFile' in args and self.isOptional:
return SKIPPED
self.mastersrc = "%s/%s" % (self.patchDir, args['infoFile'])
self.slavedest = "%s" % (args['infoFile'])
# now that everything is set-up, download the file
FileDownload.start(self)
class ShellCommandToken(BaseShellCommand):
commandOriginal = []
def __init__(self, isOptional=False, patchDir=".", **kwargs):
"""arguments:
@type patchDir: string
@param patchDir: The directory on the master that holds the patches
This directory is relative to the base buildmaster
directory.
ie. /home/buildslave/project
Defaults to '.'
'workdir' is assumed to be 'build' and should be passed if it is
anything else.
'isOptional' is assumed to be False; if the patch is optional, pass True.
"""
self.patchDir = patchDir
self.isOptional = isOptional
self.commandOriginal = []
for item in kwargs['command']:
self.commandOriginal.append(item)
if not 'workdir' in kwargs:
kwargs['workdir'] = "build"
BaseShellCommand.__init__(self, **kwargs)
def start(self):
changes = self.step_status.build.getChanges()
if len(changes) < 1:
return SKIPPED
args = parseSendchangeArguments(changes[0].files)
if not 'infoFile' in args and self.isOptional:
return SKIPPED
#log.msg("command CLEAN [BEFORE]: %s" % self.command)
#log.msg("commandOriginal: %s" % self.commandOriginal)
self.command = []
for item in self.commandOriginal:
self.command.append(item)
#log.msg("command CLEAN [AFTER]: %s" % self.command)
#log.msg("command [BEFORE]: %s" % self.command)
f = open(self.patchDir +"/" + args['infoFile'])
for line in f.readlines():
if line.startswith("repoPath"):
repoPath = line.split()[1]
if line.startswith("revision"):
revision = line.split()[1]
if line.startswith("branch:"):
branch = line.split()[1]
builderName = self.step_status.build.getBuilder().getName()
log.msg("ShellCommandToken -> repoPath: %s" % repoPath)
log.msg("ShellCommandToken -> revision: %s" % revision)
log.msg("ShellCommandToken -> branch: %s" % branch)
log.msg("ShellCommandToken -> builderName: %s" % builderName)
for index, item in enumerate(self.command):
self.command[index] = self.command[index].replace("$repoPath$", repoPath)
self.command[index] = self.command[index].replace("$revision$", revision)
self.command[index] = self.command[index].replace("$branch$", branch)
self.command[index] = self.command[index].replace("$builderName$", builderName)
#log.msg("command [AFTER]: %s" % self.command)
#log.msg("workdir [BEFORE]: %s" % self.remote_kwargs['workdir'])
self.remote_kwargs['workdir'] = self.remote_kwargs['workdir'].replace("$branch$", branch)
#log.msg("workdir [AFTER]: %s" % self.remote_kwargs['workdir'])
BaseShellCommand.start(self)
class SandboxClone(BaseShellCommand):
changeDir = ""
dest = ""
def __init__(self, dest=".", changeDir=".", **kwargs):
"""arguments:
@type changeDir: string
@param changeDir: The directory on the master that holds the processed
change requests. This directory is relative to the base
buildmaster directory.
Defaults to 'changes/processed'
"""
self.changeDir = changeDir
self.dest = dest
BaseShellCommand.__init__(self, **kwargs)
# need to explicitly tell add our custom arguments to the factory
self.addFactoryArguments(changeDir=changeDir,
dest=dest)
def start(self):
changes = self.step_status.build.getChanges()
# I think that was only here as a safety check since it used to only
# be used for sandbox builds which were supposed to only have a single change
#if len(changes) < 1:
# return SKIPPED
# The list of files changed for this build also contains an additional
# entry the is the name of the build trigger file, we need to find that
# file so that we can pass the build information along the build process
for changefile in changes[0].files:
if changefile.startswith("change-"):
f = open(self.changeDir +"/" + changefile)
for line in f.readlines():
if line.startswith("url:"):
hg_url = line[line.find(":")+1:].strip()
break
self.command = []
self.command.append("hg")
self.command.append("clone")
self.command.append(hg_url)
self.command.append(self.dest)
break
BaseShellCommand.start(self)
|
replica-con-k/easyvideo
|
refs/heads/master
|
tests/08_layer_draw.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import test
test.start('Layer drawing')
import easyvideo.screen
scr = easyvideo.screen.Screen()
import random
import pygame
def _randint(max_val):
return random.randint(0, max_val)
for circle in range(100):
try:
pygame.draw.circle(scr.background.layer,
(_randint(255), _randint(255), _randint(255), 255),
(_randint(1024), _randint(768)),
_randint(10))
except Exception, e:
test.failed('Cannot draw into layer (%s)' % e)
scr.update()
test.ok()
|
hkhamm/django_rest_tutorial_2
|
refs/heads/master
|
env/lib/python2.7/site-packages/pip/_vendor/distlib/util.py
|
163
|
#
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
if sys.platform == 'darwin' and ('__VENV_LAUNCHER__'
in os.environ):
result = os.environ['__VENV_LAUNCHER__']
else:
result = sys.executable
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
result = os.path.join(result, suffix)
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return result
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
|
bowlofstew/pycparser
|
refs/heads/master
|
tests/all_tests.py
|
23
|
#!/usr/bin/env python
import sys
sys.path[0:0] = ['.', '..']
import unittest
suite = unittest.TestLoader().loadTestsFromNames(
[
'test_c_lexer',
'test_c_ast',
'test_general',
'test_c_parser',
'test_c_generator',
]
)
testresult = unittest.TextTestRunner(verbosity=1).run(suite)
sys.exit(0 if testresult.wasSuccessful() else 1)
|
rombie/contrail-controller
|
refs/heads/master
|
src/container/kube-manager/kube_manager/tests/vnc/test_vnc_pod.py
|
1
|
#
# Copyright (c) 2017 Juniper Networks, Inc. All rights reserved.
#
import uuid
from collections import namedtuple
import ipaddress
import unittest
from cfgm_common.exceptions import NoIdError
import mock
from kube_manager.tests.vnc.test_case import KMTestCase
from kube_manager.common.kube_config_db import (NamespaceKM, PodKM)
from kube_manager.vnc.config_db import (
VirtualNetworkKM, VirtualMachineKM, VirtualMachineInterfaceKM)
from kube_manager.vnc import vnc_kubernetes_config as kube_config
from kube_manager.vnc.config_db import TagKM
from kube_manager.vnc.label_cache import XLabelCache
from kube_manager.tests.vnc.db_mock import DBBaseKM
from kube_manager.vnc.vnc_kubernetes import VncKubernetes
TestPod = namedtuple('TestPod', ['uuid', 'meta', 'spec'])
class VncPodTest(KMTestCase):
def setUp(self, extra_config_knobs=None):
super(VncPodTest, self).setUp(
extra_config_knobs=extra_config_knobs)
def tearDown(self):
super(VncPodTest, self).tearDown()
@classmethod
def setUpClass(cls, extra_config_knobs=None):
super(VncPodTest, cls).setUpClass(
extra_config_knobs=extra_config_knobs)
DBBaseKM.set_nested(False)
cls.domain = 'default-domain'
cls.cluster_project = 'test-project'
cls.vn_name = cls.cluster_name() + '-test-pod-network'
cls.service_vn_name = cls.cluster_name() + '-test-service-network'
cls.ns_name = 'test-namespace'
cls.pod_name = 'test-pod'
cls.pod_status = {
'hostIP': cls.get_kubernetes_node_ip(),
'phase': 'created'
}
cn_dict = {
'domain': cls.domain,
'project': cls.cluster_project,
'name': cls.vn_name
}
service_cn_dict = {
'domain': cls.domain,
'project': cls.cluster_project,
'name': cls.service_vn_name
}
cp_dict = {'project': cls.cluster_project}
kube_config.VncKubernetesConfig.args(). \
cluster_project = repr(cp_dict)
kube_config.VncKubernetesConfig.args(). \
cluster_pod_network = repr(cn_dict)
kube_config.VncKubernetesConfig.args(). \
cluster_service_network = repr(service_cn_dict)
kube_config.VncKubernetesConfig.vnc_kubernetes_config[
'cluster_pod_ipam_fq_name'] = \
['default-domain', cls.cluster_project, 'pod-ipam']
# Create Vrouter Object.
cls.vrouter_name = 'test-VncPodTest-vrouter'
cls.vrouter_obj = cls.create_virtual_router(cls.vrouter_name)
@classmethod
def tearDownClass(cls):
for pod in list(PodKM):
PodKM.delete(pod)
for namespace in list(NamespaceKM):
NamespaceKM.delete(namespace)
# Cleanup the Vrouter object.
cls.delete_virtual_router(cls.vrouter_obj.uuid)
super(VncPodTest, cls).tearDownClass()
def _construct_pod_spec(self, nodeName):
return {'nodeName': nodeName}
def _construct_pod_meta(self, name, uuid, namespace, labels={}):
meta = {}
meta['name'] = name
meta['uid'] = uuid
meta['namespace'] = namespace
if labels:
meta['labels'] = labels
return meta
def _create_namespace(self, ns_name, ns_eval_vn_dict, is_isolated=False, labels={}):
ns_uuid = str(uuid.uuid4())
ns_add_event = self.create_add_namespace_event(ns_name, ns_uuid)
ns_object = ns_add_event['object']
ns_object['spec'] = {}
ns_meta = ns_object['metadata']
ns_meta['annotations'] = {}
ns_meta['name'] = ns_name
ns_meta['uid'] = ns_uuid
ns_meta['namespace'] = ns_name
ns_meta['labels'] = labels
if ns_eval_vn_dict:
ns_meta['annotations']['opencontrail.org/network'] = \
ns_eval_vn_dict
if is_isolated:
ns_meta['annotations']['opencontrail.org/isolation'] = 'true'
NamespaceKM.delete(ns_name)
NamespaceKM.delete(ns_uuid)
NamespaceKM.locate(ns_name, ns_object)
NamespaceKM.locate(ns_uuid, ns_object)
self.enqueue_event(ns_add_event)
self.wait_for_all_tasks_done()
return ns_uuid
def _create_virtual_network(self, proj_obj, vn_name):
pod_vn_obj, service_vn_obj = \
self.create_pod_service_network(vn_name, self.service_vn_name, \
proj_obj, '10.32.0.0/12', '10.96.0.0/12')
return pod_vn_obj
def _create_update_pod(self, pod_name, pod_namespace, pod_status,
eval_vn_dict, action, labels={}, req_uuid=None,
wait=True):
pod_uuid = req_uuid if req_uuid else str(uuid.uuid4())
pod_spec = {'nodeName': 'test-node'}
pod_labels = labels
pod_meta = {'name': pod_name, 'uid': pod_uuid,
'namespace': pod_namespace, 'labels': pod_labels}
if eval_vn_dict:
pod_meta['annotations'] = {
'opencontrail.org/network': eval_vn_dict}
self.set_mock_for_kube()
pod_add_event = self.create_event('Pod', pod_spec, pod_meta, action)
pod_add_event['object']['status'] = pod_status
pod = PodKM.locate(pod_uuid, pod_add_event['object'])
self.enqueue_event(pod_add_event)
if wait:
self.wait_for_all_tasks_done()
return TestPod(pod.uuid, pod_meta, pod_spec)
def set_mock_for_kube(self):
if VncKubernetes._vnc_kubernetes is not None:
VncKubernetes._vnc_kubernetes.pod_mgr._kube = mock.MagicMock()
def _delete_pod(self, testpod, uuid=None, spec=None, meta=None, wait=True):
pod_del_event = self.create_event('Pod',
spec if spec else testpod.spec,
meta if meta else testpod.meta,
'DELETED')
PodKM.delete(uuid if uuid else testpod.uuid)
self.enqueue_event(pod_del_event)
if wait:
self.wait_for_all_tasks_done()
def _assert_virtual_network(self, vn_obj_uuid):
vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj_uuid)
self.assertIsNotNone(vn_obj)
vn_obj = VirtualNetworkKM.locate(vn_obj.uuid)
self.assertIsNotNone(vn_obj)
def _assert_pod_ip_is_from_vn_ipam(self, iip_obj, vn_obj_uuid):
vn_obj = self._vnc_lib.virtual_network_read(id=vn_obj_uuid)
pod_ipam = [ipam for ipam in vn_obj.get_network_ipam_refs()
if ipam['to'][-1] == 'pod-ipam'][0]
self.assertTrue(len(pod_ipam['attr'].ipam_subnets) == 1)
pod_ipam_subnet = pod_ipam['attr'].ipam_subnets[0]
self.assertIsNotNone(pod_ipam_subnet)
subnet = pod_ipam_subnet.subnet
if subnet is None:
try:
pod_ipam = self._vnc_lib.network_ipam_read(id=pod_ipam['uuid'])
subnet = pod_ipam.ipam_subnets.subnets[0].subnet
except:
pass
self.assertIsNotNone(subnet)
iip_ip = ipaddress.ip_address(unicode(iip_obj.instance_ip_address))
vn_network = ipaddress.ip_network(subnet.ip_prefix + u'/'
+ unicode(subnet.ip_prefix_len))
self.assertTrue(iip_ip in vn_network)
def _assert_virtual_machine(self, pod_uuid, cluster_project,
proj_obj, vn_obj_uuid):
vm = self._vnc_lib.virtual_machine_read(id=pod_uuid)
self.assertIsNotNone(vm)
vm = VirtualMachineKM.locate(vm.uuid)
self.assertIsNotNone(vm)
self.assertTrue(len(vm.virtual_machine_interfaces) > 0)
for vmi_id in list(vm.virtual_machine_interfaces):
vmi = self._vnc_lib.virtual_machine_interface_read(id=vmi_id)
self.assertIsNotNone(vmi)
self.assertEqual(vmi.parent_name, cluster_project)
self.assertEqual(vmi.parent_uuid, proj_obj.uuid)
vmi = VirtualMachineInterfaceKM.locate(vmi_id)
# self.assertTrue(len(vmi.security_groups) > 1)
# for sg_uuid in list(vmi.security_groups):
# sg = self._vnc_lib.security_group_read(id=sg_uuid)
# self.assertIsNotNone(sg)
self.assertTrue(len(vmi.instance_ips) == 1)
iip_uuid = list(vmi.instance_ips)[0]
iip = self._vnc_lib.instance_ip_read(id=iip_uuid)
self.assertIsNotNone(iip)
self._assert_pod_ip_is_from_vn_ipam(iip, vn_obj_uuid)
class VncPodTestClusterProjectDefined(VncPodTest):
def setUp(self, extra_config_knobs=None):
super(VncPodTestClusterProjectDefined, self).setUp(
extra_config_knobs=extra_config_knobs)
def tearDown(self):
super(VncPodTestClusterProjectDefined, self).tearDown()
def _add_update_pod(self, action):
ns_name = self.ns_name + '_' + str(uuid.uuid4())
self._create_namespace(ns_name, None)
proj_fq_name = ['default-domain', self.cluster_project]
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
vn_obj_uuid = self._create_virtual_network(proj_obj, self.vn_name).uuid
testpod = self._create_update_pod(self.pod_name,
ns_name,
self.pod_status,
None, action)
self._assert_virtual_network(vn_obj_uuid)
self._assert_virtual_machine(testpod.uuid, self.cluster_project,
proj_obj, vn_obj_uuid)
self._delete_pod(testpod)
self._assert_virtual_network(vn_obj_uuid)
tmp_fq_name = ['default-domain', self.cluster_project, self.pod_name]
self.assertRaises(
NoIdError,
self._vnc_lib.virtual_machine_read,
fq_name=tmp_fq_name
)
vn_obj = VirtualNetworkKM.locate(vn_obj_uuid)
self.assertTrue(len(vn_obj.instance_ips) == 0)
def test_pod_add_delete(self):
self._add_update_pod('ADDED')
def test_update_pod_before_add(self):
self._add_update_pod('MODIFIED')
def test_delete_add_pod_after_kube_manager_is_killed(self):
self._create_namespace(self.ns_name, None)
proj_fq_name = ['default-domain', self.cluster_project]
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
vn_obj = self._create_virtual_network(proj_obj, self.vn_name)
testpod = self._create_update_pod(self.pod_name,
self.ns_name,
self.pod_status,
None, 'ADDED')
self._assert_virtual_network(vn_obj.uuid)
self._assert_virtual_machine(testpod.uuid, self.cluster_project,
proj_obj, vn_obj.uuid)
self.kill_kube_manager()
self._delete_pod(testpod, wait=False)
testpod = self._create_update_pod(self.pod_name,
self.ns_name,
self.pod_status,
None, 'ADDED',
wait=False)
self.spawn_kube_manager()
self.set_mock_for_kube()
self.wait_for_all_tasks_done()
self._assert_virtual_network(vn_obj.uuid)
self._assert_virtual_machine(testpod.uuid, self.cluster_project,
proj_obj, vn_obj.uuid)
self._delete_pod(testpod)
vn_obj = VirtualNetworkKM.locate(vn_obj.uuid)
self._assert_virtual_network(vn_obj.uuid)
tmp_fq_name = ['default-domain', self.cluster_project, self.pod_name]
self.assertRaises(
NoIdError,
self._vnc_lib.virtual_machine_read,
fq_name=tmp_fq_name
)
vn_obj = VirtualNetworkKM.locate(vn_obj.uuid)
self.assertTrue(len(vn_obj.instance_ips) == 0)
class VncPodTestClusterProjectUndefined(VncPodTestClusterProjectDefined):
def setUp(self, extra_config_knobs=None):
super(VncPodTestClusterProjectUndefined, self).setUp(
extra_config_knobs=extra_config_knobs)
def tearDown(self):
super(VncPodTestClusterProjectUndefined, self).tearDown()
@classmethod
def setUpClass(cls, extra_config_knobs=None):
super(VncPodTestClusterProjectUndefined, cls).setUpClass(
extra_config_knobs=extra_config_knobs)
cls.cluster_project = cls.ns_name
args = {}
args['domain'] = 'default-domain'
args['project'] = cls.cluster_project
args['name'] = cls.vn_name
kube_config.VncKubernetesConfig.args().cluster_project = None
kube_config.VncKubernetesConfig.args().cluster_network = repr(args)
kube_config.VncKubernetesConfig.vnc_kubernetes_config[
'cluster_pod_ipam_fq_name'] = \
['default-domain', cls.cluster_project, 'pod-ipam']
class VncPodTestNamespaceIsolation(VncPodTest):
def setUp(self, extra_config_knobs=None):
super(VncPodTestNamespaceIsolation, self).setUp(
extra_config_knobs=extra_config_knobs)
def tearDown(self):
super(VncPodTestNamespaceIsolation, self).tearDown()
@classmethod
def setUpClass(cls, extra_config_knobs=None):
super(VncPodTestNamespaceIsolation, cls).setUpClass(
extra_config_knobs=extra_config_knobs)
cls.ns_name = 'test-namespace-isolated'
cls.cluster_project = cls.ns_name
cls.vn_name = cls.cluster_name() + '-' + cls.ns_name + '-pod-network'
args = {}
args['domain'] = 'default-domain'
args['project'] = cls.cluster_project
args['name'] = cls.vn_name
kube_config.VncKubernetesConfig.args().cluster_project = None
kube_config.VncKubernetesConfig.args().cluster_network = repr(args)
kube_config.VncKubernetesConfig.vnc_kubernetes_config[
'cluster_pod_ipam_fq_name'] = \
['default-domain', cls.cluster_project, 'pod-ipam']
def test_pod_add_delete_with_namespace_isolation_true(self):
proj_obj = self.create_project(self.cluster_project)
self._create_network_ipam('pod-ipam', 'flat-subnet', '10.32.0.0/12',
proj_obj)
self._create_namespace(self.ns_name, None, True)
vn_obj = self._vnc_lib.virtual_network_read(
fq_name=['default-domain', self.cluster_project, self.vn_name])
testpod = self._create_update_pod(self.pod_name,
self.ns_name,
self.pod_status,
None, 'ADDED')
self._assert_virtual_network(vn_obj.uuid)
self._assert_virtual_machine(testpod.uuid, self.cluster_project,
proj_obj, vn_obj.uuid)
self._delete_pod(testpod)
self._assert_virtual_network(vn_obj.uuid)
tmp_fq_name = ['default-domain', self.ns_name, self.pod_name]
self.assertRaises(
NoIdError,
self._vnc_lib.virtual_machine_read,
fq_name=tmp_fq_name
)
vn_obj = VirtualNetworkKM.locate(vn_obj.uuid)
self.assertTrue(len(vn_obj.instance_ips) == 0)
class VncPodTestCustomNetworkAnnotation(VncPodTest):
def setUp(self, extra_config_knobs=None):
super(VncPodTestCustomNetworkAnnotation, self).setUp(
extra_config_knobs=extra_config_knobs)
def tearDown(self):
super(VncPodTestCustomNetworkAnnotation, self).tearDown()
@classmethod
def setUpClass(cls, extra_config_knobs=None):
super(VncPodTestCustomNetworkAnnotation, cls).setUpClass(
extra_config_knobs=extra_config_knobs)
cls.eval_vn_dict = '{"domain":"default-domain",\
"project":"%s",\
"name":"%s"}' % (cls.cluster_project,
cls.vn_name)
def test_pod_add_delete_with_namespace_custom_network_annotation(self):
proj_obj = self.create_project(self.cluster_project)
vn_obj = self._create_virtual_network(proj_obj, self.vn_name)
self._create_namespace(self.ns_name, self.eval_vn_dict)
proj_fq_name = ['default-domain', self.cluster_project]
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
testpod = self._create_update_pod(self.pod_name,
self.ns_name,
self.pod_status,
None, 'ADDED')
self._assert_virtual_network(vn_obj.uuid)
self._assert_virtual_machine(testpod.uuid, self.cluster_project,
proj_obj, vn_obj.uuid)
self._delete_pod(testpod)
self._assert_virtual_network(vn_obj.uuid)
tmp_fq_name = ['default-domain', self.ns_name, self.pod_name]
self.assertRaises(
NoIdError,
self._vnc_lib.virtual_machine_read,
fq_name=tmp_fq_name
)
vn_obj = VirtualNetworkKM.locate(vn_obj.uuid)
self.assertTrue(len(vn_obj.instance_ips) == 0)
def test_pod_add_delete_with_pod_custom_network_annotation(self):
proj_obj = self.create_project(self.cluster_project)
vn_obj = self._create_virtual_network(proj_obj, self.vn_name)
self._create_namespace(self.ns_name, None)
proj_fq_name = ['default-domain', self.cluster_project]
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
testpod = self._create_update_pod(self.pod_name,
self.ns_name,
self.pod_status,
self.eval_vn_dict, 'ADDED')
self._assert_virtual_network(vn_obj.uuid)
self._assert_virtual_machine(testpod.uuid, self.cluster_project,
proj_obj, vn_obj.uuid)
self._delete_pod(testpod)
self._assert_virtual_network(vn_obj.uuid)
tmp_fq_name = ['default-domain', self.ns_name, self.pod_name]
self.assertRaises(
NoIdError,
self._vnc_lib.virtual_machine_read,
fq_name=tmp_fq_name
)
vn_obj = VirtualNetworkKM.locate(vn_obj.uuid)
self.assertTrue(len(vn_obj.instance_ips) == 0)
class VncPodTestScaling(VncPodTest):
def setUp(self, extra_config_knobs=None):
super(VncPodTestScaling, self).setUp(
extra_config_knobs=extra_config_knobs)
def tearDown(self):
super(VncPodTestScaling, self).tearDown()
def test_pod_add_scaling(self):
scale = 100
self._create_namespace(self.ns_name, None)
proj_fq_name = ['default-domain', self.cluster_project]
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
vn_obj_uuid = self._create_virtual_network(proj_obj, self.vn_name).uuid
self._assert_virtual_network(vn_obj_uuid)
pods = []
for i in xrange(scale):
testpod = self._create_update_pod(self.pod_name + str(i),
self.ns_name,
self.pod_status,
None, 'ADDED')
self._assert_virtual_machine(testpod.uuid, self.cluster_project,
proj_obj, vn_obj_uuid)
pods.append(testpod)
vn_obj = VirtualNetworkKM.locate(vn_obj_uuid)
self.assertTrue(len(vn_obj.instance_ips) == scale)
for i, pod in enumerate(pods):
self._delete_pod(pod)
vn_obj = VirtualNetworkKM.locate(vn_obj_uuid)
self.assertTrue(len(vn_obj.instance_ips) == scale - 1 - i)
class VncPodLabelsTest(VncPodTest):
def _construct_tag_name(self, type, value):
return "=".join([type, value])
def _construct_tag_fq_name(self, type, value, proj_obj = None):
if proj_obj:
tag_fq_name = proj_obj['fq_name'] + \
[self._construct_tag_name(type, value)]
else:
tag_fq_name = [self._construct_tag_name(type, value)]
return tag_fq_name
def _validate_tags(self, labels, validate_delete=False, proj_obj=None):
for key, value in labels.iteritems():
tag_fq_name = self._construct_tag_fq_name(key, value)
try:
tag_obj = self._vnc_lib.tag_read(fq_name=tag_fq_name)
except NoIdError:
if not validate_delete:
self.assertTrue(False)
tag_uuid = TagKM.get_fq_name_to_uuid(tag_fq_name)
if validate_delete:
self.assertIsNone(tag_uuid)
else:
self.assertIsNotNone(tag_uuid)
# TBD: validate tags are available on the VM.
def _validate_label_cache(self, uuid, labels):
obj_labels = XLabelCache.get_labels(uuid)
for key, value in labels.iteritems():
label_key = XLabelCache.get_key(key, value)
self.assertIn(label_key, obj_labels)
def _add_update_pod(self, action, labels={}, uuid=None):
self._create_namespace(self.ns_name, None)
proj_fq_name = ['default-domain', self.cluster_project]
proj_obj = self._vnc_lib.project_read(fq_name=proj_fq_name)
vn_obj_uuid = self._create_virtual_network(proj_obj, self.vn_name).uuid
pod_uuid, pod_meta, pod_spec = self._create_update_pod(self.pod_name,
self.ns_name,
self.pod_status,
None, action,
labels=labels,
req_uuid=uuid)
self._assert_virtual_network(vn_obj_uuid)
self._assert_virtual_machine(pod_uuid, self.cluster_project,
proj_obj, vn_obj_uuid)
if labels:
self._validate_label_cache(pod_uuid, labels)
return pod_uuid
def _delete_pod(self, pod_uuid):
pod = PodKM.find_by_name_or_uuid(pod_uuid)
self.assertIsNotNone(pod)
pod_spec = self._construct_pod_spec(pod.nodename)
pod_meta = self._construct_pod_meta(pod.name, pod_uuid,
pod.namespace)
super(VncPodLabelsTest, self)._delete_pod(None, pod_uuid, pod_spec, pod_meta)
def test_pod_add_delete(self):
labels = {
"testcase": unittest.TestCase.id(self)
}
pod_uuid = self._add_update_pod('ADDED', dict(labels))
self._validate_tags(labels)
# Verify that namespace tag is associated with this pod,internally.
ns_label = XLabelCache.get_namespace_label(self.ns_name)
self._validate_label_cache(pod_uuid, ns_label)
labels['modify'] = "testing_label_modify"
pod_uuid = self._add_update_pod('MODIFIED', dict(labels), pod_uuid)
self._validate_tags(labels)
self._delete_pod(pod_uuid)
self._validate_tags(labels, validate_delete=True)
|
xfournet/intellij-community
|
refs/heads/master
|
python/testData/mover/innerIf_afterDown.py
|
83
|
if value is not None:
if not False or value <= 2:
pass
print "here"
|
microcom/odoo
|
refs/heads/9.0
|
addons/website_event_sale/__init__.py
|
1023
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import controllers
import models
|
telefonicaid/perseo-fe
|
refs/heads/master
|
test/acceptance/integration/notifications/__init__.py
|
5
|
# -*- coding: utf-8 -*-
#
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of perseo-fe
#
# perseo-fe is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# perseo-fe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with perseo-fe.
# If not, see http://www.gnu.org/licenses/.
#
# For those usages not covered by the GNU Affero General Public License
# please contact with:
# iot_support at tid.es
#
__author__ = 'Iván Arias León (ivan.ariasleon@telefonica.com)'
|
YiqunPeng/Leetcode-pyq
|
refs/heads/master
|
solutions/800SimilarRGBColor.py
|
1
|
class Solution:
def similarRGB(self, color):
"""
:type color: str
:rtype: str
"""
dic = {'0':0,'1':1,'2':2,'3':3,'4':4,'5':5,'6':6,'7':7,'8':8,
'9':9,'a':10,'b':11,'c':12,'d':13,'e':14,'f':15}
ans = '#'
for i in range(3):
tar = dic[color[1+i*2]] * 16 + dic[color[2+i*2]]
print(tar)
min_squ, min_val, pre_squ = sys.maxsize, -1, sys.maxsize
for j in range(16):
squ = (j * 17 - tar) ** 2
if min_squ > squ:
min_squ = squ
min_val = j
if squ > pre_squ:
break
pre_squ = squ
val_str = ''
if min_val < 10:
val_str = str(min_val) * 2
else:
val_str = chr(min_val-10+ord('a')) * 2
ans = ans + val_str
return ans
|
FireWRT/OpenWrt-Firefly-Libraries
|
refs/heads/master
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/email/errors.py
|
120
|
# Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""email package exception classes."""
class MessageError(Exception):
"""Base class for errors in the email package."""
class MessageParseError(MessageError):
"""Base class for message parsing errors."""
class HeaderParseError(MessageParseError):
"""Error while parsing headers."""
class BoundaryError(MessageParseError):
"""Couldn't find terminating boundary."""
class MultipartConversionError(MessageError, TypeError):
"""Conversion to a multipart is prohibited."""
class CharsetError(MessageError):
"""An illegal charset was given."""
# These are parsing defects which the parser was able to work around.
class MessageDefect(ValueError):
"""Base class for a message defect."""
def __init__(self, line=None):
if line is not None:
super().__init__(line)
self.line = line
class NoBoundaryInMultipartDefect(MessageDefect):
"""A message claimed to be a multipart but had no boundary parameter."""
class StartBoundaryNotFoundDefect(MessageDefect):
"""The claimed start boundary was never found."""
class CloseBoundaryNotFoundDefect(MessageDefect):
"""A start boundary was found, but not the corresponding close boundary."""
class FirstHeaderLineIsContinuationDefect(MessageDefect):
"""A message had a continuation line as its first header line."""
class MisplacedEnvelopeHeaderDefect(MessageDefect):
"""A 'Unix-from' header was found in the middle of a header block."""
class MissingHeaderBodySeparatorDefect(MessageDefect):
"""Found line with no leading whitespace and no colon before blank line."""
# XXX: backward compatibility, just in case (it was never emitted).
MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
class MultipartInvariantViolationDefect(MessageDefect):
"""A message claimed to be a multipart but no subparts were found."""
class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
"""An invalid content transfer encoding was set on the multipart itself."""
class UndecodableBytesDefect(MessageDefect):
"""Header contained bytes that could not be decoded"""
class InvalidBase64PaddingDefect(MessageDefect):
"""base64 encoded sequence had an incorrect length"""
class InvalidBase64CharactersDefect(MessageDefect):
"""base64 encoded sequence had characters not in base64 alphabet"""
# These errors are specific to header parsing.
class HeaderDefect(MessageDefect):
"""Base class for a header defect."""
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
class InvalidHeaderDefect(HeaderDefect):
"""Header is not valid, message gives details."""
class HeaderMissingRequiredValue(HeaderDefect):
"""A header that must have a value had none"""
class NonPrintableDefect(HeaderDefect):
"""ASCII characters outside the ascii-printable range found"""
def __init__(self, non_printables):
super().__init__(non_printables)
self.non_printables = non_printables
def __str__(self):
return ("the following ASCII non-printables found in header: "
"{}".format(self.non_printables))
class ObsoleteHeaderDefect(HeaderDefect):
"""Header uses syntax declared obsolete by RFC 5322"""
class NonASCIILocalPartDefect(HeaderDefect):
"""local_part contains non-ASCII characters"""
# This defect only occurs during unicode parsing, not when
# parsing messages decoded from binary.
|
mjtamlyn/django
|
refs/heads/master
|
tests/custom_pk/models.py
|
106
|
"""
Using a custom primary key
By default, Django adds an ``"id"`` field to each model. But you can override
this behavior by explicitly adding ``primary_key=True`` to a field.
"""
from django.db import models
from .fields import MyAutoField
class Employee(models.Model):
employee_code = models.IntegerField(primary_key=True, db_column='code')
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
class Meta:
ordering = ('last_name', 'first_name')
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
class Business(models.Model):
name = models.CharField(max_length=20, primary_key=True)
employees = models.ManyToManyField(Employee)
class Meta:
verbose_name_plural = 'businesses'
def __str__(self):
return self.name
class Bar(models.Model):
id = MyAutoField(primary_key=True, db_index=True)
def __str__(self):
return repr(self.pk)
class Foo(models.Model):
bar = models.ForeignKey(Bar, models.CASCADE)
|
joone/chromium-crosswalk
|
refs/heads/2016.04.css-round-display-edtior-draft-1
|
native_client_sdk/src/build_tools/tests/test_projects_test.py
|
45
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(BUILD_TOOLS_DIR)))
MOCK_DIR = os.path.join(CHROME_SRC, 'third_party', 'pymock')
sys.path.append(BUILD_TOOLS_DIR)
sys.path.append(MOCK_DIR)
import mock
import test_projects
class TestMain(unittest.TestCase):
"""Tests for main() entry point of the script."""
def testInvalidArgs(self):
with mock.patch('sys.stderr'):
with self.assertRaises(SystemExit):
test_projects.main(['--foo'])
if __name__ == '__main__':
unittest.main()
|
mlperf/training_results_v0.6
|
refs/heads/master
|
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/topi/tests/python/test_topi_conv2d_nhwc.py
|
1
|
"""Example code to do convolution."""
import os
import numpy as np
import tvm
import topi
import topi.testing
from tvm.contrib.pickle_memoize import memoize
from topi.util import get_const_tuple
def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1):
in_height = in_width = in_size
A = tvm.placeholder((batch, in_height, in_width, in_channel), name='A')
W = tvm.placeholder((kernel, kernel, in_channel, num_filter), name='W')
dW = topi.nn.dilate(W, (1, dilation, dilation, 1))
B = topi.nn.conv2d_nhwc(A, dW, stride, padding)
a_shape = get_const_tuple(A.shape)
w_shape = get_const_tuple(W.shape)
dtype = A.dtype
@memoize("topi.tests.test_topi_conv2d_nhwc.verify_nhwc")
def get_ref_data():
a_np = np.random.uniform(size=a_shape).astype(dtype)
w_np = np.random.uniform(size=w_shape).astype(dtype)
dw_np = topi.testing.dilate_python(w_np, (1, dilation, dilation, 1))
b_np = topi.testing.conv2d_nhwc_python(a_np, dw_np, stride, padding)
return a_np, w_np, b_np
a_np, w_np, b_np = get_ref_data()
def check_device(device):
if not tvm.module.enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.create(device):
s = topi.generic.schedule_conv2d_nhwc([B])
ctx = tvm.context(device, 0)
a = tvm.nd.array(a_np, ctx)
w = tvm.nd.array(w_np, ctx)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)
func = tvm.build(s, [A, W, B], device)
func(a, w, b)
np.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
for device in ['llvm']:
check_device(device)
def test_conv2d_nhwc():
verify_conv2d_nhwc(1, 256, 32, 256, 3, 1, "SAME")
verify_conv2d_nhwc(4, 128, 16, 128, 5, 2, "SAME")
verify_conv2d_nhwc(4, 128, 16, 256, 5, 2, "SAME")
verify_conv2d_nhwc(1, 256, 32, 256, 3, 1, "VALID")
verify_conv2d_nhwc(1, 256, 32, 256, 3, 1, "VALID")
verify_conv2d_nhwc(4, 128, 16, 128, 5, 2, "VALID")
verify_conv2d_nhwc(4, 128, 16, 256, 5, 2, "VALID")
# dilation = 2
verify_conv2d_nhwc(1, 256, 32, 256, 3, 1, "SAME", dilation=2)
if __name__ == "__main__":
test_conv2d_nhwc()
|
iamutkarshtiwari/sympy
|
refs/heads/master
|
sympy/core/numbers.py
|
26
|
from __future__ import print_function, division
import decimal
import fractions
import math
import re as regex
from collections import defaultdict
from .containers import Tuple
from .sympify import converter, sympify, _sympify, SympifyError
from .singleton import S, Singleton
from .expr import Expr, AtomicExpr
from .decorators import _sympifyit
from .cache import cacheit, clear_cache
from .logic import fuzzy_not
from sympy.core.compatibility import (
as_int, integer_types, long, string_types, with_metaclass, HAS_GMPY,
SYMPY_INTS)
import mpmath
import mpmath.libmp as mlib
from mpmath.libmp import mpf_pow, mpf_pi, mpf_e, phi_fixed
from mpmath.ctx_mp import mpnumeric
from mpmath.libmp.libmpf import (
finf as _mpf_inf, fninf as _mpf_ninf,
fnan as _mpf_nan, fzero as _mpf_zero, _normalize as mpf_normalize,
prec_to_dps)
from sympy.utilities.misc import debug
rnd = mlib.round_nearest
_LOG2 = math.log(2)
def comp(z1, z2, tol=None):
"""Return a bool indicating whether the error between z1 and z2 is <= tol.
If ``tol`` is None then True will be returned if there is a significant
difference between the numbers: ``abs(z1 - z2)*10**p <= 1/2`` where ``p``
is the lower of the precisions of the values. A comparison of strings will
be made if ``z1`` is a Number and a) ``z2`` is a string or b) ``tol`` is ''
and ``z2`` is a Number.
When ``tol`` is a nonzero value, if z2 is non-zero and ``|z1| > 1``
the error is normalized by ``|z1|``, so if you want to see if the
absolute error between ``z1`` and ``z2`` is <= ``tol`` then call this
as ``comp(z1 - z2, 0, tol)``.
"""
if type(z2) is str:
if not isinstance(z1, Number):
raise ValueError('when z2 is a str z1 must be a Number')
return str(z1) == z2
if not z1:
z1, z2 = z2, z1
if not z1:
return True
if not tol:
if tol is None:
if type(z2) is str and getattr(z1, 'is_Number', False):
return str(z1) == z2
a, b = Float(z1), Float(z2)
return int(abs(a - b)*10**prec_to_dps(
min(a._prec, b._prec)))*2 <= 1
elif all(getattr(i, 'is_Number', False) for i in (z1, z2)):
return z1._prec == z2._prec and str(z1) == str(z2)
raise ValueError('exact comparison requires two Numbers')
diff = abs(z1 - z2)
az1 = abs(z1)
if z2 and az1 > 1:
return diff/az1 <= tol
else:
return diff <= tol
def mpf_norm(mpf, prec):
"""Return the mpf tuple normalized appropriately for the indicated
precision after doing a check to see if zero should be returned or
not when the mantissa is 0. ``mpf_normlize`` always assumes that this
is zero, but it may not be since the mantissa for mpf's values "+inf",
"-inf" and "nan" have a mantissa of zero, too.
Note: this is not intended to validate a given mpf tuple, so sending
mpf tuples that were not created by mpmath may produce bad results. This
is only a wrapper to ``mpf_normalize`` which provides the check for non-
zero mpfs that have a 0 for the mantissa.
"""
sign, man, expt, bc = mpf
if not man:
# hack for mpf_normalize which does not do this;
# it assumes that if man is zero the result is 0
# (see issue 6639)
if not bc:
return _mpf_zero
else:
# don't change anything; this should already
# be a well formed mpf tuple
return mpf
rv = mpf_normalize(sign, man, expt, bc, prec, rnd)
return rv
# TODO: we should use the warnings module
_errdict = {"divide": False}
def seterr(divide=False):
"""
Should sympy raise an exception on 0/0 or return a nan?
divide == True .... raise an exception
divide == False ... return nan
"""
if _errdict["divide"] != divide:
clear_cache()
_errdict["divide"] = divide
def _decimal_to_Rational_prec(dec):
"""Convert an ordinary decimal instance to a Rational."""
if not dec.is_finite():
raise TypeError("dec must be finite, got %s." % dec)
s, d, e = dec.as_tuple()
prec = len(d)
if e >= 0: # it's an integer
rv = Integer(int(dec))
else:
s = (-1)**s
d = sum([di*10**i for i, di in enumerate(reversed(d))])
rv = Rational(s*d, 10**-e)
return rv, prec
def _literal_float(f):
"""Return True if n can be interpreted as a floating point number."""
pat = r"[-+]?((\d*\.\d+)|(\d+\.?))(eE[-+]?\d+)?"
return bool(regex.match(pat, f))
# (a,b) -> gcd(a,b)
_gcdcache = {}
# TODO caching with decorator, but not to degrade performance
def igcd(*args):
"""Computes positive integer greatest common divisor.
The algorithm is based on the well known Euclid's algorithm. To
improve speed, igcd() has its own caching mechanism implemented.
Examples
========
>>> from sympy.core.numbers import igcd
>>> igcd(2, 4)
2
>>> igcd(5, 10, 15)
5
"""
a = args[0]
for b in args[1:]:
try:
a = _gcdcache[(a, b)]
except KeyError:
a, b = as_int(a), as_int(b)
if a and b:
if b < 0:
b = -b
while b:
a, b = b, a % b
else:
a = abs(a or b)
_gcdcache[(a, b)] = a
if a == 1 or b == 1:
return 1
return a
def ilcm(*args):
"""Computes integer least common multiple.
Examples
========
>>> from sympy.core.numbers import ilcm
>>> ilcm(5, 10)
10
>>> ilcm(7, 3)
21
>>> ilcm(5, 10, 15)
30
"""
if 0 in args:
return 0
a = args[0]
for b in args[1:]:
a = a*b // igcd(a, b)
return a
def igcdex(a, b):
"""Returns x, y, g such that g = x*a + y*b = gcd(a, b).
>>> from sympy.core.numbers import igcdex
>>> igcdex(2, 3)
(-1, 1, 1)
>>> igcdex(10, 12)
(-1, 1, 2)
>>> x, y, g = igcdex(100, 2004)
>>> x, y, g
(-20, 1, 4)
>>> x*100 + y*2004
4
"""
if (not a) and (not b):
return (0, 1, 0)
if not a:
return (0, b//abs(b), abs(b))
if not b:
return (a//abs(a), 0, abs(a))
if a < 0:
a, x_sign = -a, -1
else:
x_sign = 1
if b < 0:
b, y_sign = -b, -1
else:
y_sign = 1
x, y, r, s = 1, 0, 0, 1
while b:
(c, q) = (a % b, a // b)
(a, b, r, s, x, y) = (b, c, x - q*r, y - q*s, r, s)
return (x*x_sign, y*y_sign, a)
class Number(AtomicExpr):
"""
Represents any kind of number in sympy.
Floating point numbers are represented by the Float class.
Integer numbers (of any size), together with rational numbers (again,
there is no limit on their size) are represented by the Rational class.
If you want to represent, for example, ``1+sqrt(2)``, then you need to do::
Rational(1) + sqrt(Rational(2))
"""
is_commutative = True
is_number = True
is_Number = True
__slots__ = []
# Used to make max(x._prec, y._prec) return x._prec when only x is a float
_prec = -1
def __new__(cls, *obj):
if len(obj) == 1:
obj = obj[0]
if isinstance(obj, Number):
return obj
if isinstance(obj, SYMPY_INTS):
return Integer(obj)
if isinstance(obj, tuple) and len(obj) == 2:
return Rational(*obj)
if isinstance(obj, (float, mpmath.mpf, decimal.Decimal)):
return Float(obj)
if isinstance(obj, string_types):
val = sympify(obj)
if isinstance(val, Number):
return val
else:
raise ValueError('String "%s" does not denote a Number' % obj)
if isinstance(obj, Number):
return obj
msg = "expected str|int|long|float|Decimal|Number object but got %r"
raise TypeError(msg % type(obj).__name__)
def __divmod__(self, other):
from .containers import Tuple
from sympy.functions.elementary.complexes import sign
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(self).__name__, type(other).__name__))
if not other:
raise ZeroDivisionError('modulo by zero')
if self.is_Integer and other.is_Integer:
return Tuple(*divmod(self.p, other.p))
else:
rat = self/other
w = sign(rat)*int(abs(rat)) # = rat.floor()
r = self - other*w
return Tuple(w, r)
def __rdivmod__(self, other):
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
raise TypeError(msg % (type(other).__name__, type(self).__name__))
return divmod(other, self)
def __round__(self, *args):
return round(float(self), *args)
def _as_mpf_val(self, prec):
"""Evaluation of mpf tuple accurate to at least prec bits."""
raise NotImplementedError('%s needs ._as_mpf_val() method' %
(self.__class__.__name__))
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def _as_mpf_op(self, prec):
prec = max(prec, self._prec)
return self._as_mpf_val(prec), prec
def __float__(self):
return mlib.to_float(self._as_mpf_val(53))
def _eval_conjugate(self):
return self
def _eval_order(self, *symbols):
from sympy import Order
# Order(5, x, y) -> Order(1,x,y)
return Order(S.One, *symbols)
def _eval_subs(self, old, new):
if old == -self:
return -new
return self # there is no other possibility
def _eval_is_finite(self):
return True
@classmethod
def class_key(cls):
return 1, 0, 'Number'
@cacheit
def sort_key(self, order=None):
return self.class_key(), (0, ()), (), self
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.Infinity
elif other is S.NegativeInfinity:
return S.NegativeInfinity
return AtomicExpr.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
return S.Infinity
return AtomicExpr.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.NaN:
return S.NaN
elif other is S.Infinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.Infinity
else:
return S.NegativeInfinity
elif other is S.NegativeInfinity:
if self.is_zero:
return S.NaN
elif self.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
elif isinstance(other, Tuple):
return NotImplemented
return AtomicExpr.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.NaN:
return S.NaN
elif other is S.Infinity or other is S.NegativeInfinity:
return S.Zero
return AtomicExpr.__div__(self, other)
__truediv__ = __div__
def __eq__(self, other):
raise NotImplementedError('%s needs .__eq__() method' %
(self.__class__.__name__))
def __ne__(self, other):
raise NotImplementedError('%s needs .__ne__() method' %
(self.__class__.__name__))
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
raise NotImplementedError('%s needs .__lt__() method' %
(self.__class__.__name__))
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
raise NotImplementedError('%s needs .__le__() method' %
(self.__class__.__name__))
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
return _sympify(other).__lt__(self)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
return _sympify(other).__le__(self)
def __hash__(self):
return super(Number, self).__hash__()
def is_constant(self, *wrt, **flags):
return True
def as_coeff_mul(self, *deps, **kwargs):
# a -> c*t
if self.is_Rational or not kwargs.pop('rational', True):
return self, tuple()
elif self.is_negative:
return S.NegativeOne, (-self,)
return S.One, (self,)
def as_coeff_add(self, *deps):
# a -> c + t
if self.is_Rational:
return self, tuple()
return S.Zero, (self,)
def as_coeff_Mul(self, rational=False):
"""Efficiently extract the coefficient of a product. """
if rational and not self.is_Rational:
return S.One, self
return self, S.One
def as_coeff_Add(self):
"""Efficiently extract the coefficient of a summation. """
return self, S.Zero
def gcd(self, other):
"""Compute GCD of `self` and `other`. """
from sympy.polys import gcd
return gcd(self, other)
def lcm(self, other):
"""Compute LCM of `self` and `other`. """
from sympy.polys import lcm
return lcm(self, other)
def cofactors(self, other):
"""Compute GCD and cofactors of `self` and `other`. """
from sympy.polys import cofactors
return cofactors(self, other)
class Float(Number):
"""Represent a floating-point number of arbitrary precision.
Examples
========
>>> from sympy import Float
>>> Float(3.5)
3.50000000000000
>>> Float(3)
3.00000000000000
Creating Floats from strings (and Python ``int`` and ``long``
types) will give a minimum precision of 15 digits, but the
precision will automatically increase to capture all digits
entered.
>>> Float(1)
1.00000000000000
>>> Float(10**20)
100000000000000000000.
>>> Float('1e20')
100000000000000000000.
However, *floating-point* numbers (Python ``float`` types) retain
only 15 digits of precision:
>>> Float(1e20)
1.00000000000000e+20
>>> Float(1.23456789123456789)
1.23456789123457
It may be preferable to enter high-precision decimal numbers
as strings:
Float('1.23456789123456789')
1.23456789123456789
The desired number of digits can also be specified:
>>> Float('1e-3', 3)
0.00100
>>> Float(100, 4)
100.0
Float can automatically count significant figures if a null string
is sent for the precision; space are also allowed in the string. (Auto-
counting is only allowed for strings, ints and longs).
>>> Float('123 456 789 . 123 456', '')
123456789.123456
>>> Float('12e-3', '')
0.012
>>> Float(3, '')
3.
If a number is written in scientific notation, only the digits before the
exponent are considered significant if a decimal appears, otherwise the
"e" signifies only how to move the decimal:
>>> Float('60.e2', '') # 2 digits significant
6.0e+3
>>> Float('60e2', '') # 4 digits significant
6000.
>>> Float('600e-2', '') # 3 digits significant
6.00
Notes
=====
Floats are inexact by their nature unless their value is a binary-exact
value.
>>> approx, exact = Float(.1, 1), Float(.125, 1)
For calculation purposes, evalf needs to be able to change the precision
but this will not increase the accuracy of the inexact value. The
following is the most accurate 5-digit approximation of a value of 0.1
that had only 1 digit of precision:
>>> approx.evalf(5)
0.099609
By contrast, 0.125 is exact in binary (as it is in base 10) and so it
can be passed to Float or evalf to obtain an arbitrary precision with
matching accuracy:
>>> Float(exact, 5)
0.12500
>>> exact.evalf(20)
0.12500000000000000000
Trying to make a high-precision Float from a float is not disallowed,
but one must keep in mind that the *underlying float* (not the apparent
decimal value) is being obtained with high precision. For example, 0.3
does not have a finite binary representation. The closest rational is
the fraction 5404319552844595/2**54. So if you try to obtain a Float of
0.3 to 20 digits of precision you will not see the same thing as 0.3
followed by 19 zeros:
>>> Float(0.3, 20)
0.29999999999999998890
If you want a 20-digit value of the decimal 0.3 (not the floating point
approximation of 0.3) you should send the 0.3 as a string. The underlying
representation is still binary but a higher precision than Python's float
is used:
>>> Float('0.3', 20)
0.30000000000000000000
Although you can increase the precision of an existing Float using Float
it will not increase the accuracy -- the underlying value is not changed:
>>> def show(f): # binary rep of Float
... from sympy import Mul, Pow
... s, m, e, b = f._mpf_
... v = Mul(int(m), Pow(2, int(e), evaluate=False), evaluate=False)
... print('%s at prec=%s' % (v, f._prec))
...
>>> t = Float('0.3', 3)
>>> show(t)
4915/2**14 at prec=13
>>> show(Float(t, 20)) # higher prec, not higher accuracy
4915/2**14 at prec=70
>>> show(Float(t, 2)) # lower prec
307/2**10 at prec=10
The same thing happens when evalf is used on a Float:
>>> show(t.evalf(20))
4915/2**14 at prec=70
>>> show(t.evalf(2))
307/2**10 at prec=10
Finally, Floats can be instantiated with an mpf tuple (n, c, p) to
produce the number (-1)**n*c*2**p:
>>> n, c, p = 1, 5, 0
>>> (-1)**n*c*2**p
-5
>>> Float((1, 5, 0))
-5.00000000000000
An actual mpf tuple also contains the number of bits in c as the last
element of the tuple:
>>> _._mpf_
(1, 5, 0, 3)
This is not needed for instantiation and is not the same thing as the
precision. The mpf tuple and the precision are two separate quantities
that Float tracks.
"""
__slots__ = ['_mpf_', '_prec']
# A Float represents many real numbers,
# both rational and irrational.
is_rational = None
is_irrational = None
is_number = True
is_real = True
is_Float = True
def __new__(cls, num, prec=None):
if isinstance(num, string_types):
num = num.replace(' ', '')
if num.startswith('.') and len(num) > 1:
num = '0' + num
elif num.startswith('-.') and len(num) > 2:
num = '-0.' + num[2:]
elif isinstance(num, float) and num == 0:
num = '0'
elif isinstance(num, (SYMPY_INTS, Integer)):
num = str(num) # faster than mlib.from_int
elif isinstance(num, mpmath.mpf):
num = num._mpf_
if prec is None:
dps = 15
if isinstance(num, Float):
return num
if isinstance(num, string_types) and _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
dps = max(15, dps)
elif prec == '':
if not isinstance(num, string_types):
raise ValueError('The null string can only be used when '
'the number to Float is passed as a string or an integer.')
ok = None
if _literal_float(num):
try:
Num = decimal.Decimal(num)
except decimal.InvalidOperation:
pass
else:
isint = '.' not in num
num, dps = _decimal_to_Rational_prec(Num)
if num.is_Integer and isint:
dps = max(dps, len(str(num).lstrip('-')))
ok = True
if ok is None:
raise ValueError('string-float not recognized: %s' % num)
else:
dps = prec
prec = mlib.libmpf.dps_to_prec(dps)
if isinstance(num, float):
_mpf_ = mlib.from_float(num, prec, rnd)
elif isinstance(num, str):
_mpf_ = mlib.from_str(num, prec, rnd)
elif isinstance(num, decimal.Decimal):
if num.is_finite():
_mpf_ = mlib.from_str(str(num), prec, rnd)
elif num.is_nan():
_mpf_ = _mpf_nan
elif num.is_infinite():
if num > 0:
_mpf_ = _mpf_inf
else:
_mpf_ = _mpf_ninf
else:
raise ValueError("unexpected decimal value %s" % str(num))
elif isinstance(num, Rational):
_mpf_ = mlib.from_rational(num.p, num.q, prec, rnd)
elif isinstance(num, tuple) and len(num) in (3, 4):
if type(num[1]) is str:
# it's a hexadecimal (coming from a pickled object)
# assume that it is in standard form
num = list(num)
num[1] = long(num[1], 16)
_mpf_ = tuple(num)
else:
if not num[1] and len(num) == 4:
# handle normalization hack
return Float._new(num, prec)
else:
_mpf_ = mpmath.mpf(
S.NegativeOne**num[0]*num[1]*2**num[2])._mpf_
elif isinstance(num, Float):
_mpf_ = num._mpf_
if prec < num._prec:
_mpf_ = mpf_norm(_mpf_, prec)
else:
_mpf_ = mpmath.mpf(num)._mpf_
# special cases
if _mpf_ == _mpf_zero:
pass # we want a Float
elif _mpf_ == _mpf_nan:
return S.NaN
obj = Expr.__new__(cls)
obj._mpf_ = _mpf_
obj._prec = prec
return obj
@classmethod
def _new(cls, _mpf_, _prec):
# special cases
if _mpf_ == _mpf_zero:
return S.Zero # XXX this is different from Float which gives 0.0
elif _mpf_ == _mpf_nan:
return S.NaN
obj = Expr.__new__(cls)
obj._mpf_ = mpf_norm(_mpf_, _prec)
obj._prec = _prec
return obj
# mpz can't be pickled
def __getnewargs__(self):
return (mlib.to_pickable(self._mpf_),)
def __getstate__(self):
return {'_prec': self._prec}
def _hashable_content(self):
return (self._mpf_, self._prec)
def floor(self):
return Integer(int(mlib.to_int(
mlib.mpf_floor(self._mpf_, self._prec))))
def ceiling(self):
return Integer(int(mlib.to_int(
mlib.mpf_ceil(self._mpf_, self._prec))))
@property
def num(self):
return mpmath.mpf(self._mpf_)
def _as_mpf_val(self, prec):
rv = mpf_norm(self._mpf_, prec)
if rv != self._mpf_ and self._prec == prec:
debug(self._mpf_, rv)
return rv
def _as_mpf_op(self, prec):
return self._mpf_, max(prec, self._prec)
def _eval_is_finite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return False
return True
def _eval_is_infinite(self):
if self._mpf_ in (_mpf_inf, _mpf_ninf):
return True
return False
def _eval_is_integer(self):
return self._mpf_ == _mpf_zero
def _eval_is_negative(self):
if self._mpf_ == _mpf_ninf:
return True
if self._mpf_ == _mpf_inf:
return False
return self.num < 0
def _eval_is_positive(self):
if self._mpf_ == _mpf_inf:
return True
if self._mpf_ == _mpf_ninf:
return False
return self.num > 0
def _eval_is_zero(self):
return self._mpf_ == _mpf_zero
def __nonzero__(self):
return self._mpf_ != _mpf_zero
__bool__ = __nonzero__
def __neg__(self):
return Float._new(mlib.mpf_neg(self._mpf_), self._prec)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_add(self._mpf_, rhs, prec, rnd), prec)
return Number.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_sub(self._mpf_, rhs, prec, rnd), prec)
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mul(self._mpf_, rhs, prec, rnd), prec)
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number) and other != 0:
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_div(self._mpf_, rhs, prec, rnd), prec)
return Number.__div__(self, other)
__truediv__ = __div__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational) and other.q != 1:
# calculate mod with Rationals, *then* round the result
return Float(Rational.__mod__(Rational(self), other),
prec_to_dps(self._prec))
if isinstance(other, Float):
r = self/other
if r == int(r):
prec = max([prec_to_dps(i)
for i in (self._prec, other._prec)])
return Float(0, prec)
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(self._mpf_, rhs, prec, rnd), prec)
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Float):
return other.__mod__(self)
if isinstance(other, Number):
rhs, prec = other._as_mpf_op(self._prec)
return Float._new(mlib.mpf_mod(rhs, self._mpf_, prec, rnd), prec)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
"""
expt is symbolic object but not equal to 0, 1
(-p)**r -> exp(r*log(-p)) -> exp(r*(log(p) + I*Pi)) ->
-> p**r*(sin(Pi*r) + cos(Pi*r)*I)
"""
if self == 0:
if expt.is_positive:
return S.Zero
if expt.is_negative:
return Float('inf')
if isinstance(expt, Number):
if isinstance(expt, Integer):
prec = self._prec
return Float._new(
mlib.mpf_pow_int(self._mpf_, expt.p, prec, rnd), prec)
elif isinstance(expt, Rational) and \
expt.p == 1 and expt.q % 2 and self.is_negative:
return Pow(S.NegativeOne, expt, evaluate=False)*(
-self)._eval_power(expt)
expt, prec = expt._as_mpf_op(self._prec)
mpfself = self._mpf_
try:
y = mpf_pow(mpfself, expt, prec, rnd)
return Float._new(y, prec)
except mlib.ComplexResult:
re, im = mlib.mpc_pow(
(mpfself, _mpf_zero), (expt, _mpf_zero), prec, rnd)
return Float._new(re, prec) + \
Float._new(im, prec)*S.ImaginaryUnit
def __abs__(self):
return Float._new(mlib.mpf_abs(self._mpf_), self._prec)
def __int__(self):
if self._mpf_ == _mpf_zero:
return 0
return int(mlib.to_int(self._mpf_)) # uses round_fast = round_down
__long__ = __int__
def __eq__(self, other):
if isinstance(other, float):
# coerce to Float at same precision
o = Float(other)
try:
ompf = o._as_mpf_val(self._prec)
except ValueError:
return False
return bool(mlib.mpf_eq(self._mpf_, ompf))
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if isinstance(other, NumberSymbol):
if other.is_irrational:
return False
return other.__eq__(self)
if isinstance(other, Float):
return bool(mlib.mpf_eq(self._mpf_, other._mpf_))
if isinstance(other, Number):
# numbers should compare at the same precision;
# all _as_mpf_val routines should be sure to abide
# by the request to change the prec if necessary; if
# they don't, the equality test will fail since it compares
# the mpf tuples
ompf = other._as_mpf_val(self._prec)
return bool(mlib.mpf_eq(self._mpf_, ompf))
return False # Float != non-Number
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__le__(self)
if other.is_comparable:
other = other.evalf()
if isinstance(other, Number) and other is not S.NaN:
return _sympify(bool(
mlib.mpf_gt(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__lt__(self)
if other.is_comparable:
other = other.evalf()
if isinstance(other, Number) and other is not S.NaN:
return _sympify(bool(
mlib.mpf_ge(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__ge__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__ge__(self)
if other.is_real and other.is_number:
other = other.evalf()
if isinstance(other, Number) and other is not S.NaN:
return _sympify(bool(
mlib.mpf_lt(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__gt__(self)
if other.is_real and other.is_number:
other = other.evalf()
if isinstance(other, Number) and other is not S.NaN:
return _sympify(bool(
mlib.mpf_le(self._mpf_, other._as_mpf_val(self._prec))))
return Expr.__le__(self, other)
def __hash__(self):
return super(Float, self).__hash__()
def epsilon_eq(self, other, epsilon="1e-15"):
return abs(self - other) < Float(epsilon)
def _sage_(self):
import sage.all as sage
return sage.RealNumber(str(self))
def __format__(self, format_spec):
return format(decimal.Decimal(str(self)), format_spec)
# Add sympify converters
converter[float] = converter[decimal.Decimal] = Float
# this is here to work nicely in Sage
RealNumber = Float
class Rational(Number):
"""Represents integers and rational numbers (p/q) of any size.
Examples
========
>>> from sympy import Rational, nsimplify, S, pi
>>> Rational(3)
3
>>> Rational(1, 2)
1/2
Rational is unprejudiced in accepting input. If a float is passed, the
underlying value of the binary representation will be returned:
>>> Rational(.5)
1/2
>>> Rational(.2)
3602879701896397/18014398509481984
If the simpler representation of the float is desired then consider
limiting the denominator to the desired value or convert the float to
a string (which is roughly equivalent to limiting the denominator to
10**12):
>>> Rational(str(.2))
1/5
>>> Rational(.2).limit_denominator(10**12)
1/5
An arbitrarily precise Rational is obtained when a string literal is
passed:
>>> Rational("1.23")
123/100
>>> Rational('1e-2')
1/100
>>> Rational(".1")
1/10
>>> Rational('1e-2/3.2')
1/320
The conversion of other types of strings can be handled by
the sympify() function, and conversion of floats to expressions
or simple fractions can be handled with nsimplify:
>>> S('.[3]') # repeating digits in brackets
1/3
>>> S('3**2/10') # general expressions
9/10
>>> nsimplify(.3) # numbers that have a simple form
3/10
But if the input does not reduce to a literal Rational, an error will
be raised:
>>> Rational(pi)
Traceback (most recent call last):
...
TypeError: invalid input: pi
Low-level
---------
Access numerator and denominator as .p and .q:
>>> r = Rational(3, 4)
>>> r
3/4
>>> r.p
3
>>> r.q
4
Note that p and q return integers (not SymPy Integers) so some care
is needed when using them in expressions:
>>> r.p/r.q
0.75
See Also
========
sympify, sympy.simplify.simplify.nsimplify
"""
is_real = True
is_integer = False
is_rational = True
is_number = True
__slots__ = ['p', 'q']
is_Rational = True
@cacheit
def __new__(cls, p, q=None):
if q is None:
if isinstance(p, Rational):
return p
if isinstance(p, string_types):
p = p.replace(' ', '')
try:
# we might have a Float
neg_pow, digits, expt = decimal.Decimal(p).as_tuple()
p = [1, -1][neg_pow]*int("".join(str(x) for x in digits))
if expt > 0:
# TODO: this branch needs a test
return Rational(p*Pow(10, expt), 1)
return Rational(p, Pow(10, -expt))
except decimal.InvalidOperation:
f = regex.match('^([-+]?[0-9]+)/([0-9]+)$', p)
if f:
n, d = f.groups()
return Rational(int(n), int(d))
elif p.count('/') == 1:
p, q = p.split('/')
return Rational(Rational(p), Rational(q))
else:
pass # error will raise below
else:
try:
if isinstance(p, fractions.Fraction):
return Rational(p.numerator, p.denominator)
except NameError:
pass # error will raise below
if isinstance(p, (float, Float)):
return Rational(*float(p).as_integer_ratio())
if not isinstance(p, SYMPY_INTS + (Rational,)):
raise TypeError('invalid input: %s' % p)
q = S.One
else:
p = Rational(p)
q = Rational(q)
if isinstance(q, Rational):
p *= q.q
q = q.p
if isinstance(p, Rational):
q *= p.q
p = p.p
# p and q are now integers
if q == 0:
if p == 0:
if _errdict["divide"]:
raise ValueError("Indeterminate 0/0")
else:
return S.NaN
if p < 0:
return S.NegativeInfinity
return S.Infinity
if q < 0:
q = -q
p = -p
n = igcd(abs(p), q)
if n > 1:
p //= n
q //= n
if q == 1:
return Integer(p)
if p == 1 and q == 2:
return S.Half
obj = Expr.__new__(cls)
obj.p = p
obj.q = q
return obj
def limit_denominator(self, max_denominator=1000000):
"""Closest Rational to self with denominator at most max_denominator.
>>> from sympy import Rational
>>> Rational('3.141592653589793').limit_denominator(10)
22/7
>>> Rational('3.141592653589793').limit_denominator(100)
311/99
"""
# Algorithm notes: For any real number x, define a *best upper
# approximation* to x to be a rational number p/q such that:
#
# (1) p/q >= x, and
# (2) if p/q > r/s >= x then s > q, for any rational r/s.
#
# Define *best lower approximation* similarly. Then it can be
# proved that a rational number is a best upper or lower
# approximation to x if, and only if, it is a convergent or
# semiconvergent of the (unique shortest) continued fraction
# associated to x.
#
# To find a best rational approximation with denominator <= M,
# we find the best upper and lower approximations with
# denominator <= M and take whichever of these is closer to x.
# In the event of a tie, the bound with smaller denominator is
# chosen. If both denominators are equal (which can happen
# only when max_denominator == 1 and self is midway between
# two integers) the lower bound---i.e., the floor of self, is
# taken.
if max_denominator < 1:
raise ValueError("max_denominator should be at least 1")
if self.q <= max_denominator:
return self
p0, q0, p1, q1 = 0, 1, 1, 0
n, d = self.p, self.q
while True:
a = n//d
q2 = q0 + a*q1
if q2 > max_denominator:
break
p0, q0, p1, q1 = p1, q1, p0 + a*p1, q2
n, d = d, n - a*d
k = (max_denominator - q0)//q1
bound1 = Rational(p0 + k*p1, q0 + k*q1)
bound2 = Rational(p1, q1)
if abs(bound2 - self) <= abs(bound1 - self):
return bound2
else:
return bound1
def __getnewargs__(self):
return (self.p, self.q)
def _hashable_content(self):
return (self.p, self.q)
def _eval_is_positive(self):
return self.p > 0
def _eval_is_zero(self):
return self.p == 0
def __neg__(self):
return Rational(-self.p, self.q)
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.q + self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return other + self
else:
return Number.__add__(self, other)
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.q - self.q*other.p, self.q*other.q)
elif isinstance(other, Float):
return -other + self
else:
return Number.__sub__(self, other)
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Rational):
return Rational(self.p*other.p, self.q*other.q)
elif isinstance(other, Float):
return other*self
else:
return Number.__mul__(self, other)
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Rational):
if self.p and other.p == S.Zero:
return S.ComplexInfinity
else:
return Rational(self.p*other.q, self.q*other.p)
elif isinstance(other, Float):
return self*(1/other)
else:
return Number.__div__(self, other)
__truediv__ = __div__
@_sympifyit('other', NotImplemented)
def __mod__(self, other):
if isinstance(other, Rational):
n = (self.p*other.q) // (other.p*self.q)
return Rational(self.p*other.q - n*other.p*self.q, self.q*other.q)
if isinstance(other, Float):
# calculate mod with Rationals, *then* round the answer
return Float(self.__mod__(Rational(other)),
prec_to_dps(other._prec))
return Number.__mod__(self, other)
@_sympifyit('other', NotImplemented)
def __rmod__(self, other):
if isinstance(other, Rational):
return Rational.__mod__(other, self)
return Number.__rmod__(self, other)
def _eval_power(self, expt):
if isinstance(expt, Number):
if isinstance(expt, Float):
return self._eval_evalf(expt._prec)**expt
if expt.is_negative:
# (3/4)**-2 -> (4/3)**2
ne = -expt
if (ne is S.One):
return Rational(self.q, self.p)
if self.is_negative:
if expt.q != 1:
return -(S.NegativeOne)**((expt.p % expt.q) /
S(expt.q))*Rational(self.q, -self.p)**ne
else:
return S.NegativeOne**ne*Rational(self.q, -self.p)**ne
else:
return Rational(self.q, self.p)**ne
if expt is S.Infinity: # -oo already caught by test for negative
if self.p > self.q:
# (3/2)**oo -> oo
return S.Infinity
if self.p < -self.q:
# (-3/2)**oo -> oo + I*oo
return S.Infinity + S.Infinity*S.ImaginaryUnit
return S.Zero
if isinstance(expt, Integer):
# (4/3)**2 -> 4**2 / 3**2
return Rational(self.p**expt.p, self.q**expt.p)
if isinstance(expt, Rational):
if self.p != 1:
# (4/3)**(5/6) -> 4**(5/6)*3**(-5/6)
return Integer(self.p)**expt*Integer(self.q)**(-expt)
# as the above caught negative self.p, now self is positive
return Integer(self.q)**Rational(
expt.p*(expt.q - 1), expt.q) / \
Integer(self.q)**Integer(expt.p)
if self.is_negative and expt.is_even:
return (-self)**expt
return
def _as_mpf_val(self, prec):
return mlib.from_rational(self.p, self.q, prec, rnd)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(mlib.from_rational(self.p, self.q, prec, rnd))
def __abs__(self):
return Rational(abs(self.p), self.q)
def __int__(self):
p, q = self.p, self.q
if p < 0:
return -(-p//q)
return p//q
__long__ = __int__
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if isinstance(other, NumberSymbol):
if other.is_irrational:
return False
return other.__eq__(self)
if isinstance(other, Number):
if isinstance(other, Rational):
# a Rational is always in reduced form so will never be 2/4
# so we can just check equivalence of args
return self.p == other.p and self.q == other.q
if isinstance(other, Float):
return mlib.mpf_eq(self._as_mpf_val(other._prec), other._mpf_)
return False
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__le__(self)
expr = self
if isinstance(other, Number):
if isinstance(other, Rational):
return _sympify(bool(self.p*other.q > self.q*other.p))
if isinstance(other, Float):
return _sympify(bool(mlib.mpf_gt(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__gt__(expr, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__lt__(self)
expr = self
if isinstance(other, Number):
if isinstance(other, Rational):
return _sympify(bool(self.p*other.q >= self.q*other.p))
if isinstance(other, Float):
return _sympify(bool(mlib.mpf_ge(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__ge__(expr, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if isinstance(other, NumberSymbol):
return other.__ge__(self)
expr = self
if isinstance(other, Number):
if isinstance(other, Rational):
return _sympify(bool(self.p*other.q < self.q*other.p))
if isinstance(other, Float):
return _sympify(bool(mlib.mpf_lt(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__lt__(expr, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
expr = self
if isinstance(other, NumberSymbol):
return other.__gt__(self)
elif isinstance(other, Number):
if isinstance(other, Rational):
return _sympify(bool(self.p*other.q <= self.q*other.p))
if isinstance(other, Float):
return _sympify(bool(mlib.mpf_le(
self._as_mpf_val(other._prec), other._mpf_)))
elif other.is_number and other.is_real:
expr, other = Integer(self.p), self.q*other
return Expr.__le__(expr, other)
def __hash__(self):
return super(Rational, self).__hash__()
def factors(self, limit=None, use_trial=True, use_rho=False,
use_pm1=False, verbose=False, visual=False):
"""A wrapper to factorint which return factors of self that are
smaller than limit (or cheap to compute). Special methods of
factoring are disabled by default so that only trial division is used.
"""
from sympy.ntheory import factorint
f = factorint(self.p, limit=limit, use_trial=use_trial,
use_rho=use_rho, use_pm1=use_pm1,
verbose=verbose).copy()
f = defaultdict(int, f)
for p, e in factorint(self.q, limit=limit,
use_trial=use_trial,
use_rho=use_rho,
use_pm1=use_pm1,
verbose=verbose).items():
f[p] += -e
if len(f) > 1 and 1 in f:
del f[1]
if not f:
f = {1: 1}
if not visual:
return dict(f)
else:
if -1 in f:
f.pop(-1)
args = [S.NegativeOne]
else:
args = []
args.extend([Pow(*i, evaluate=False)
for i in sorted(f.items())])
return Mul(*args, evaluate=False)
@_sympifyit('other', NotImplemented)
def gcd(self, other):
if isinstance(other, Rational):
if other is S.Zero:
return other
return Rational(
Integer(igcd(self.p, other.p)),
Integer(ilcm(self.q, other.q)))
return Number.gcd(self, other)
@_sympifyit('other', NotImplemented)
def lcm(self, other):
if isinstance(other, Rational):
return Rational(
self.p*other.p//igcd(self.p, other.p),
igcd(self.q, other.q))
return Number.lcm(self, other)
def as_numer_denom(self):
return Integer(self.p), Integer(self.q)
def _sage_(self):
import sage.all as sage
return sage.Integer(self.p)/sage.Integer(self.q)
def as_content_primitive(self, radical=False):
"""Return the tuple (R, self/R) where R is the positive Rational
extracted from self.
Examples
========
>>> from sympy import S
>>> (S(-3)/2).as_content_primitive()
(3/2, -1)
See docstring of Expr.as_content_primitive for more examples.
"""
if self:
if self.is_positive:
return self, S.One
return -self, S.NegativeOne
return S.One, self
# int -> Integer
_intcache = {}
# TODO move this tracing facility to sympy/core/trace.py ?
def _intcache_printinfo():
ints = sorted(_intcache.keys())
nhit = _intcache_hits
nmiss = _intcache_misses
if nhit == 0 and nmiss == 0:
print()
print('Integer cache statistic was not collected')
return
miss_ratio = float(nmiss) / (nhit + nmiss)
print()
print('Integer cache statistic')
print('-----------------------')
print()
print('#items: %i' % len(ints))
print()
print(' #hit #miss #total')
print()
print('%5i %5i (%7.5f %%) %5i' % (
nhit, nmiss, miss_ratio*100, nhit + nmiss)
)
print()
print(ints)
_intcache_hits = 0
_intcache_misses = 0
def int_trace(f):
import os
if os.getenv('SYMPY_TRACE_INT', 'no').lower() != 'yes':
return f
def Integer_tracer(cls, i):
global _intcache_hits, _intcache_misses
try:
_intcache_hits += 1
return _intcache[i]
except KeyError:
_intcache_hits -= 1
_intcache_misses += 1
return f(cls, i)
# also we want to hook our _intcache_printinfo into sys.atexit
import atexit
atexit.register(_intcache_printinfo)
return Integer_tracer
class Integer(Rational):
q = 1
is_integer = True
is_number = True
is_Integer = True
__slots__ = ['p']
def _as_mpf_val(self, prec):
return mlib.from_int(self.p, prec)
def _mpmath_(self, prec, rnd):
return mpmath.make_mpf(self._as_mpf_val(prec))
# TODO caching with decorator, but not to degrade performance
@int_trace
def __new__(cls, i):
if isinstance(i, string_types):
i = i.replace(' ', '')
# whereas we cannot, in general, make a Rational from an
# arbitrary expression, we can make an Integer unambiguously
# (except when a non-integer expression happens to round to
# an integer). So we proceed by taking int() of the input and
# let the int routines determine whether the expression can
# be made into an int or whether an error should be raised.
try:
ival = int(i)
except TypeError:
raise TypeError(
'Integer can only work with integer expressions.')
try:
return _intcache[ival]
except KeyError:
# We only work with well-behaved integer types. This converts, for
# example, numpy.int32 instances.
obj = Expr.__new__(cls)
obj.p = ival
_intcache[ival] = obj
return obj
def __getnewargs__(self):
return (self.p,)
# Arithmetic operations are here for efficiency
def __int__(self):
return self.p
__long__ = __int__
def __neg__(self):
return Integer(-self.p)
def __abs__(self):
if self.p >= 0:
return self
else:
return Integer(-self.p)
def __divmod__(self, other):
from .containers import Tuple
if isinstance(other, Integer):
return Tuple(*(divmod(self.p, other.p)))
else:
return Number.__divmod__(self, other)
def __rdivmod__(self, other):
from .containers import Tuple
if isinstance(other, integer_types):
return Tuple(*(divmod(other, self.p)))
else:
try:
other = Number(other)
except TypeError:
msg = "unsupported operand type(s) for divmod(): '%s' and '%s'"
oname = type(other).__name__
sname = type(self).__name__
raise TypeError(msg % (oname, sname))
return Number.__divmod__(other, self)
# TODO make it decorator + bytecodehacks?
def __add__(self, other):
if isinstance(other, integer_types):
return Integer(self.p + other)
elif isinstance(other, Integer):
return Integer(self.p + other.p)
return Rational.__add__(self, other)
def __radd__(self, other):
if isinstance(other, integer_types):
return Integer(other + self.p)
return Rational.__add__(self, other)
def __sub__(self, other):
if isinstance(other, integer_types):
return Integer(self.p - other)
elif isinstance(other, Integer):
return Integer(self.p - other.p)
return Rational.__sub__(self, other)
def __rsub__(self, other):
if isinstance(other, integer_types):
return Integer(other - self.p)
return Rational.__rsub__(self, other)
def __mul__(self, other):
if isinstance(other, integer_types):
return Integer(self.p*other)
elif isinstance(other, Integer):
return Integer(self.p*other.p)
return Rational.__mul__(self, other)
def __rmul__(self, other):
if isinstance(other, integer_types):
return Integer(other*self.p)
return Rational.__mul__(self, other)
def __mod__(self, other):
if isinstance(other, integer_types):
return Integer(self.p % other)
elif isinstance(other, Integer):
return Integer(self.p % other.p)
return Rational.__mod__(self, other)
def __rmod__(self, other):
if isinstance(other, integer_types):
return Integer(other % self.p)
elif isinstance(other, Integer):
return Integer(other.p % self.p)
return Rational.__rmod__(self, other)
def __eq__(self, other):
if isinstance(other, integer_types):
return (self.p == other)
elif isinstance(other, Integer):
return (self.p == other.p)
return Rational.__eq__(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if isinstance(other, Integer):
return _sympify(self.p > other.p)
return Rational.__gt__(self, other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if isinstance(other, Integer):
return _sympify(self.p < other.p)
return Rational.__lt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if isinstance(other, Integer):
return _sympify(self.p >= other.p)
return Rational.__ge__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if isinstance(other, Integer):
return _sympify(self.p <= other.p)
return Rational.__le__(self, other)
def __hash__(self):
return hash(self.p)
def __index__(self):
return self.p
########################################
def _eval_is_odd(self):
return bool(self.p % 2)
def _eval_power(self, expt):
"""
Tries to do some simplifications on self**expt
Returns None if no further simplifications can be done
When exponent is a fraction (so we have for example a square root),
we try to find a simpler representation by factoring the argument
up to factors of 2**15, e.g.
- sqrt(4) becomes 2
- sqrt(-4) becomes 2*I
- (2**(3+7)*3**(6+7))**Rational(1,7) becomes 6*18**(3/7)
Further simplification would require a special call to factorint on
the argument which is not done here for sake of speed.
"""
from sympy import perfect_power
if expt is S.Infinity:
if self.p > S.One:
return S.Infinity
# cases -1, 0, 1 are done in their respective classes
return S.Infinity + S.ImaginaryUnit*S.Infinity
if expt is S.NegativeInfinity:
return Rational(1, self)**S.Infinity
if not isinstance(expt, Number):
# simplify when expt is even
# (-2)**k --> 2**k
if self.is_negative and expt.is_even:
return (-self)**expt
if not isinstance(expt, Rational):
return
if expt is S.Half and self.is_negative:
# we extract I for this special case since everyone is doing so
return S.ImaginaryUnit*Pow(-self, expt)
if expt.is_negative:
# invert base and change sign on exponent
ne = -expt
if self.is_negative:
if expt.q != 1:
return -(S.NegativeOne)**((expt.p % expt.q) /
S(expt.q))*Rational(1, -self)**ne
else:
return (S.NegativeOne)**ne*Rational(1, -self)**ne
else:
return Rational(1, self.p)**ne
# see if base is a perfect root, sqrt(4) --> 2
x, xexact = integer_nthroot(abs(self.p), expt.q)
if xexact:
# if it's a perfect root we've finished
result = Integer(x**abs(expt.p))
if self.is_negative:
result *= S.NegativeOne**expt
return result
# The following is an algorithm where we collect perfect roots
# from the factors of base.
# if it's not an nth root, it still might be a perfect power
b_pos = int(abs(self.p))
p = perfect_power(b_pos)
if p is not False:
dict = {p[0]: p[1]}
else:
dict = Integer(self).factors(limit=2**15)
# now process the dict of factors
if self.is_negative:
dict[-1] = 1
out_int = 1 # integer part
out_rad = 1 # extracted radicals
sqr_int = 1
sqr_gcd = 0
sqr_dict = {}
for prime, exponent in dict.items():
exponent *= expt.p
# remove multiples of expt.q: (2**12)**(1/10) -> 2*(2**2)**(1/10)
div_e, div_m = divmod(exponent, expt.q)
if div_e > 0:
out_int *= prime**div_e
if div_m > 0:
# see if the reduced exponent shares a gcd with e.q
# (2**2)**(1/10) -> 2**(1/5)
g = igcd(div_m, expt.q)
if g != 1:
out_rad *= Pow(prime, Rational(div_m//g, expt.q//g))
else:
sqr_dict[prime] = div_m
# identify gcd of remaining powers
for p, ex in sqr_dict.items():
if sqr_gcd == 0:
sqr_gcd = ex
else:
sqr_gcd = igcd(sqr_gcd, ex)
if sqr_gcd == 1:
break
for k, v in sqr_dict.items():
sqr_int *= k**(v//sqr_gcd)
if sqr_int == self and out_int == 1 and out_rad == 1:
result = None
else:
result = out_int*out_rad*Pow(sqr_int, Rational(sqr_gcd, expt.q))
return result
def _eval_is_prime(self):
from sympy.ntheory import isprime
return isprime(self)
def _eval_is_composite(self):
if self > 1:
return fuzzy_not(self.is_prime)
else:
return False
def as_numer_denom(self):
return self, S.One
def __floordiv__(self, other):
return Integer(self.p // Integer(other).p)
def __rfloordiv__(self, other):
return Integer(Integer(other).p // self.p)
# Add sympify converters
for i_type in integer_types:
converter[i_type] = Integer
class AlgebraicNumber(Expr):
"""Class for representing algebraic numbers in SymPy. """
__slots__ = ['rep', 'root', 'alias', 'minpoly']
is_AlgebraicNumber = True
is_algebraic = True
is_number = True
def __new__(cls, expr, coeffs=None, alias=None, **args):
"""Construct a new algebraic number. """
from sympy import Poly
from sympy.polys.polyclasses import ANP, DMP
from sympy.polys.numberfields import minimal_polynomial
from sympy.core.symbol import Symbol
expr = sympify(expr)
if isinstance(expr, (tuple, Tuple)):
minpoly, root = expr
if not minpoly.is_Poly:
minpoly = Poly(minpoly)
elif expr.is_AlgebraicNumber:
minpoly, root = expr.minpoly, expr.root
else:
minpoly, root = minimal_polynomial(
expr, args.get('gen'), polys=True), expr
dom = minpoly.get_domain()
if coeffs is not None:
if not isinstance(coeffs, ANP):
rep = DMP.from_sympy_list(sympify(coeffs), 0, dom)
scoeffs = Tuple(*coeffs)
else:
rep = DMP.from_list(coeffs.to_list(), 0, dom)
scoeffs = Tuple(*coeffs.to_list())
if rep.degree() >= minpoly.degree():
rep = rep.rem(minpoly.rep)
else:
rep = DMP.from_list([1, 0], 0, dom)
scoeffs = Tuple(1, 0)
if root.is_negative:
rep = -rep
scoeffs = Tuple(-1, 0)
sargs = (root, scoeffs)
if alias is not None:
if not isinstance(alias, Symbol):
alias = Symbol(alias)
sargs = sargs + (alias,)
obj = Expr.__new__(cls, *sargs)
obj.rep = rep
obj.root = root
obj.alias = alias
obj.minpoly = minpoly
return obj
def __hash__(self):
return super(AlgebraicNumber, self).__hash__()
def _eval_evalf(self, prec):
return self.as_expr()._evalf(prec)
@property
def is_aliased(self):
"""Returns ``True`` if ``alias`` was set. """
return self.alias is not None
def as_poly(self, x=None):
"""Create a Poly instance from ``self``. """
from sympy import Dummy, Poly, PurePoly
if x is not None:
return Poly.new(self.rep, x)
else:
if self.alias is not None:
return Poly.new(self.rep, self.alias)
else:
return PurePoly.new(self.rep, Dummy('x'))
def as_expr(self, x=None):
"""Create a Basic expression from ``self``. """
return self.as_poly(x or self.root).as_expr().expand()
def coeffs(self):
"""Returns all SymPy coefficients of an algebraic number. """
return [ self.rep.dom.to_sympy(c) for c in self.rep.all_coeffs() ]
def native_coeffs(self):
"""Returns all native coefficients of an algebraic number. """
return self.rep.all_coeffs()
def to_algebraic_integer(self):
"""Convert ``self`` to an algebraic integer. """
from sympy import Poly
f = self.minpoly
if f.LC() == 1:
return self
coeff = f.LC()**(f.degree() - 1)
poly = f.compose(Poly(f.gen/f.LC()))
minpoly = poly*coeff
root = f.LC()*self.root
return AlgebraicNumber((minpoly, root), self.coeffs())
def _eval_simplify(self, ratio, measure):
from sympy.polys import RootOf, minpoly
for r in [r for r in self.minpoly.all_roots() if r.func != RootOf]:
if minpoly(self.root - r).is_Symbol:
# use the matching root if it's simpler
if measure(r) < ratio*measure(self.root):
return AlgebraicNumber(r)
return self
class RationalConstant(Rational):
"""
Abstract base class for rationals with specific behaviors
Derived classes must define class attributes p and q and should probably all
be singletons.
"""
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class IntegerConstant(Integer):
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
class Zero(with_metaclass(Singleton, IntegerConstant)):
"""The number zero.
Zero is a singleton, and can be accessed by ``S.Zero``
Examples
========
>>> from sympy import S, Integer, zoo
>>> Integer(0) is S.Zero
True
>>> 1/S.Zero
zoo
References
==========
.. [1] http://en.wikipedia.org/wiki/Zero
"""
p = 0
q = 1
is_positive = False
is_negative = False
is_zero = True
is_number = True
__slots__ = []
@staticmethod
def __abs__():
return S.Zero
@staticmethod
def __neg__():
return S.Zero
def _eval_power(self, expt):
if expt.is_positive:
return self
if expt.is_negative:
return S.ComplexInfinity
if expt.is_real is False:
return S.NaN
# infinities are already handled with pos and neg
# tests above; now throw away leading numbers on Mul
# exponent
coeff, terms = expt.as_coeff_Mul()
if coeff.is_negative:
return S.ComplexInfinity**terms
if coeff is not S.One: # there is a Number to discard
return self**terms
def _eval_order(self, *symbols):
# Order(0,x) -> 0
return self
def __nonzero__(self):
return False
__bool__ = __nonzero__
class One(with_metaclass(Singleton, IntegerConstant)):
"""The number one.
One is a singleton, and can be accessed by ``S.One``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(1) is S.One
True
References
==========
.. [1] http://en.wikipedia.org/wiki/1_%28number%29
"""
is_number = True
p = 1
q = 1
__slots__ = []
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.NegativeOne
def _eval_power(self, expt):
return self
def _eval_order(self, *symbols):
return
@staticmethod
def factors(limit=None, use_trial=True, use_rho=False, use_pm1=False,
verbose=False, visual=False):
if visual:
return S.One
return {1: 1}
class NegativeOne(with_metaclass(Singleton, IntegerConstant)):
"""The number negative one.
NegativeOne is a singleton, and can be accessed by ``S.NegativeOne``.
Examples
========
>>> from sympy import S, Integer
>>> Integer(-1) is S.NegativeOne
True
See Also
========
One
References
==========
.. [1] http://en.wikipedia.org/wiki/%E2%88%921_%28number%29
"""
is_number = True
p = -1
q = 1
__slots__ = []
@staticmethod
def __abs__():
return S.One
@staticmethod
def __neg__():
return S.One
def _eval_power(self, expt):
if expt.is_odd:
return S.NegativeOne
if expt.is_even:
return S.One
if isinstance(expt, Number):
if isinstance(expt, Float):
return Float(-1.0)**expt
if expt is S.NaN:
return S.NaN
if expt is S.Infinity or expt is S.NegativeInfinity:
return S.NaN
if expt is S.Half:
return S.ImaginaryUnit
if isinstance(expt, Rational):
if expt.q == 2:
return S.ImaginaryUnit**Integer(expt.p)
i, r = divmod(expt.p, expt.q)
if i:
return self**i*self**Rational(r, expt.q)
return
class Half(with_metaclass(Singleton, RationalConstant)):
"""The rational number 1/2.
Half is a singleton, and can be accessed by ``S.Half``.
Examples
========
>>> from sympy import S, Rational
>>> Rational(1, 2) is S.Half
True
References
==========
.. [1] http://en.wikipedia.org/wiki/One_half
"""
is_number = True
p = 1
q = 2
__slots__ = []
@staticmethod
def __abs__():
return S.Half
class Infinity(with_metaclass(Singleton, Number)):
r"""Positive infinite quantity.
In real analysis the symbol `\infty` denotes an unbounded
limit: `x\to\infty` means that `x` grows without bound.
Infinity is often used not only to define a limit but as a value
in the affinely extended real number system. Points labeled `+\infty`
and `-\infty` can be added to the topological space of the real numbers,
producing the two-point compactification of the real numbers. Adding
algebraic properties to this gives us the extended real numbers.
Infinity is a singleton, and can be accessed by ``S.Infinity``,
or can be imported as ``oo``.
Examples
========
>>> from sympy import oo, exp, limit, Symbol
>>> 1 + oo
oo
>>> 42/oo
0
>>> x = Symbol('x')
>>> limit(exp(x), x, oo)
oo
See Also
========
NegativeInfinity, NaN
References
==========
.. [1] http://en.wikipedia.org/wiki/Infinity
"""
is_commutative = True
is_positive = True
is_infinite = True
is_number = True
is_prime = False
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\infty"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf'):
return S.NaN
else:
return Float('inf')
else:
return S.Infinity
return NotImplemented
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.Infinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('inf'):
return S.NaN
else:
return Float('inf')
else:
return S.Infinity
return NotImplemented
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.Zero or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == 0:
return S.NaN
if other > 0:
return Float('inf')
else:
return Float('-inf')
else:
if other > 0:
return S.Infinity
else:
return S.NegativeInfinity
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf') or \
other == Float('inf'):
return S.NaN
elif other.is_nonnegative:
return Float('inf')
else:
return Float('-inf')
else:
if other >= 0:
return S.Infinity
else:
return S.NegativeInfinity
return NotImplemented
__truediv__ = __div__
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.NegativeInfinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``oo ** nan`` ``nan``
``oo ** -p`` ``0`` ``p`` is number, ``oo``
================ ======= ==============================
See Also
========
Pow
NaN
NegativeInfinity
"""
if expt.is_positive:
return S.Infinity
if expt.is_negative:
return S.Zero
if expt is S.NaN:
return S.NaN
if expt is S.ComplexInfinity:
return S.NaN
if expt.is_number:
return self**expt.evalf()
def _as_mpf_val(self, prec):
return mlib.finf
def _sage_(self):
import sage.all as sage
return sage.oo
def __hash__(self):
return super(Infinity, self).__hash__()
def __eq__(self, other):
return other is S.Infinity
def __ne__(self, other):
return other is not S.Infinity
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if other.is_real:
return S.false
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.NegativeInfinity:
return S.false
elif other.is_nonpositive:
return S.false
elif other.is_infinite and other.is_positive:
return S.true
return Expr.__le__(self, other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.NegativeInfinity:
return S.true
elif other.is_nonpositive:
return S.true
elif other.is_infinite and other.is_positive:
return S.false
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if other.is_real:
return S.true
return Expr.__ge__(self, other)
def __mod__(self, other):
return S.NaN
__rmod__ = __mod__
oo = S.Infinity
class NegativeInfinity(with_metaclass(Singleton, Number)):
"""Negative infinite quantity.
NegativeInfinity is a singleton, and can be accessed
by ``S.NegativeInfinity``.
See Also
========
Infinity
"""
is_commutative = True
is_negative = True
is_infinite = True
is_number = True
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"-\infty"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
if isinstance(other, Number):
if other is S.Infinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('inf'):
return Float('nan')
else:
return Float('-inf')
else:
return S.NegativeInfinity
return NotImplemented
__radd__ = __add__
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
if isinstance(other, Number):
if other is S.NegativeInfinity or other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf'):
return Float('nan')
else:
return Float('-inf')
else:
return S.NegativeInfinity
return NotImplemented
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
if isinstance(other, Number):
if other is S.Zero or other is S.NaN:
return S.NaN
elif other.is_Float:
if other is S.NaN or other.is_zero:
return S.NaN
elif other.is_positive:
return Float('-inf')
else:
return Float('inf')
else:
if other.is_positive:
return S.NegativeInfinity
else:
return S.Infinity
return NotImplemented
__rmul__ = __mul__
@_sympifyit('other', NotImplemented)
def __div__(self, other):
if isinstance(other, Number):
if other is S.Infinity or \
other is S.NegativeInfinity or \
other is S.NaN:
return S.NaN
elif other.is_Float:
if other == Float('-inf') or \
other == Float('inf') or \
other is S.NaN:
return S.NaN
elif other.is_nonnegative:
return Float('-inf')
else:
return Float('inf')
else:
if other >= 0:
return S.NegativeInfinity
else:
return S.Infinity
return NotImplemented
__truediv__ = __div__
def __abs__(self):
return S.Infinity
def __neg__(self):
return S.Infinity
def _eval_power(self, expt):
"""
``expt`` is symbolic object but not equal to 0 or 1.
================ ======= ==============================
Expression Result Notes
================ ======= ==============================
``(-oo) ** nan`` ``nan``
``(-oo) ** oo`` ``nan``
``(-oo) ** -oo`` ``nan``
``(-oo) ** e`` ``oo`` ``e`` is positive even integer
``(-oo) ** o`` ``-oo`` ``o`` is positive odd integer
================ ======= ==============================
See Also
========
Infinity
Pow
NaN
"""
if isinstance(expt, Number):
if expt is S.NaN or \
expt is S.Infinity or \
expt is S.NegativeInfinity:
return S.NaN
if isinstance(expt, Integer) and expt.is_positive:
if expt.is_odd:
return S.NegativeInfinity
else:
return S.Infinity
return S.NegativeOne**expt*S.Infinity**expt
def _as_mpf_val(self, prec):
return mlib.fninf
def _sage_(self):
import sage.all as sage
return -(sage.oo)
def __hash__(self):
return super(NegativeInfinity, self).__hash__()
def __eq__(self, other):
return other is S.NegativeInfinity
def __ne__(self, other):
return other is not S.NegativeInfinity
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.Infinity:
return S.true
elif other.is_nonnegative:
return S.true
elif other.is_infinite and other.is_negative:
return S.false
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if other.is_real:
return S.true
return Expr.__le__(self, other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
if other.is_real:
return S.false
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
if other.is_real:
if other.is_finite or other is S.Infinity:
return S.false
elif other.is_nonnegative:
return S.false
elif other.is_infinite and other.is_negative:
return S.true
return Expr.__ge__(self, other)
def __mod__(self, other):
return S.NaN
__rmod__ = __mod__
class NaN(with_metaclass(Singleton, Number)):
"""
Not a Number.
This serves as a place holder for numeric values that are indeterminate.
Most operations on NaN, produce another NaN. Most indeterminate forms,
such as ``0/0`` or ``oo - oo` produce NaN. Two exceptions are ``0**0``
and ``oo**0``, which all produce ``1`` (this is consistent with Python's
float).
NaN is loosely related to floating point nan, which is defined in the
IEEE 754 floating point standard, and corresponds to the Python
``float('nan')``. Differences are noted below.
NaN is mathematically not equal to anything else, even NaN itself. This
explains the initially counter-intuitive results with ``Eq`` and ``==`` in
the examples below.
NaN is not comparable so inequalities raise a TypeError. This is in
constrast with floating point nan where all inequalities are false.
NaN is a singleton, and can be accessed by ``S.NaN``, or can be imported
as ``nan``.
Examples
========
>>> from sympy import nan, S, oo, Eq
>>> nan is S.NaN
True
>>> oo - oo
nan
>>> nan + 1
nan
>>> Eq(nan, nan) # mathematical equality
False
>>> nan == nan # structural equality
True
References
==========
.. [1] http://en.wikipedia.org/wiki/NaN
"""
is_commutative = True
is_real = None
is_rational = None
is_algebraic = None
is_transcendental = None
is_integer = None
is_comparable = False
is_finite = None
is_zero = None
is_prime = None
is_positive = None
is_negative = None
is_number = True
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\mathrm{NaN}"
@_sympifyit('other', NotImplemented)
def __add__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __sub__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __mul__(self, other):
return self
@_sympifyit('other', NotImplemented)
def __div__(self, other):
return self
__truediv__ = __div__
def _as_mpf_val(self, prec):
return _mpf_nan
def _sage_(self):
import sage.all as sage
return sage.NaN
def __hash__(self):
return super(NaN, self).__hash__()
def __eq__(self, other):
# NaN is structurally equal to another NaN
return other is S.NaN
def __ne__(self, other):
return other is not S.NaN
def _eval_Eq(self, other):
# NaN is not mathematically equal to anything, even NaN
return S.false
# Expr will _sympify and raise TypeError
__gt__ = Expr.__gt__
__ge__ = Expr.__ge__
__lt__ = Expr.__lt__
__le__ = Expr.__le__
nan = S.NaN
class ComplexInfinity(with_metaclass(Singleton, AtomicExpr)):
r"""Complex infinity.
In complex analysis the symbol `\tilde\infty`, called "complex
infinity", represents a quantity with infinite magnitude, but
undetermined complex phase.
ComplexInfinity is a singleton, and can be accessed by
``S.ComplexInfinity``, or can be imported as ``zoo``.
Examples
========
>>> from sympy import zoo, oo
>>> zoo + 42
zoo
>>> 42/zoo
0
>>> zoo + zoo
nan
>>> zoo*zoo
zoo
See Also
========
Infinity
"""
is_commutative = True
is_infinite = True
is_number = True
is_prime = False
__slots__ = []
def __new__(cls):
return AtomicExpr.__new__(cls)
def _latex(self, printer):
return r"\tilde{\infty}"
@staticmethod
def __abs__():
return S.Infinity
@staticmethod
def __neg__():
return S.ComplexInfinity
def _eval_power(self, expt):
if expt is S.ComplexInfinity:
return S.NaN
if isinstance(expt, Number):
if expt is S.Zero:
return S.NaN
else:
if expt.is_positive:
return S.ComplexInfinity
else:
return S.Zero
def _sage_(self):
import sage.all as sage
return sage.UnsignedInfinityRing.gen()
zoo = S.ComplexInfinity
class NumberSymbol(AtomicExpr):
is_commutative = True
is_finite = True
is_number = True
__slots__ = []
is_NumberSymbol = True
def __new__(cls):
return AtomicExpr.__new__(cls)
def approximation(self, number_cls):
""" Return an interval with number_cls endpoints
that contains the value of NumberSymbol.
If not implemented, then return None.
"""
def _eval_evalf(self, prec):
return Float._new(self._as_mpf_val(prec), prec)
def __eq__(self, other):
try:
other = _sympify(other)
except SympifyError:
return False # sympy != other --> not ==
if self is other:
return True
if isinstance(other, Number) and self.is_irrational:
return False
return False # NumberSymbol != non-(Number|self)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, other))
if self is other:
return S.false
if isinstance(other, Number):
approx = self.approximation_interval(other.__class__)
if approx is not None:
l, u = approx
if other < l:
return S.false
if other > u:
return S.true
return _sympify(self.evalf() < other)
if other.is_real and other.is_number:
other = other.evalf()
return _sympify(self.evalf() < other)
return Expr.__lt__(self, other)
def __le__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s <= %s" % (self, other))
if self is other:
return S.true
if other.is_real and other.is_number:
other = other.evalf()
if isinstance(other, Number):
return _sympify(self.evalf() <= other)
return Expr.__le__(self, other)
def __gt__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s > %s" % (self, other))
r = _sympify((-self) < (-other))
if r in (S.true, S.false):
return r
else:
return Expr.__gt__(self, other)
def __ge__(self, other):
try:
other = _sympify(other)
except SympifyError:
raise TypeError("Invalid comparison %s >= %s" % (self, other))
r = _sympify((-self) <= (-other))
if r in (S.true, S.false):
return r
else:
return Expr.__ge__(self, other)
def __int__(self):
# subclass with appropriate return value
raise NotImplementedError
def __long__(self):
return self.__int__()
def __hash__(self):
return super(NumberSymbol, self).__hash__()
class Exp1(with_metaclass(Singleton, NumberSymbol)):
r"""The `e` constant.
The transcendental number `e = 2.718281828\dots` is the base of the
natural logarithm and of the exponential function, `e = \exp(1)`.
Sometimes called Euler's number or Napier's constant.
Exp1 is a singleton, and can be accessed by ``S.Exp1``,
or can be imported as ``E``.
Examples
========
>>> from sympy import exp, log, E
>>> E is exp(1)
True
>>> log(E)
1
References
==========
.. [1] http://en.wikipedia.org/wiki/E_%28mathematical_constant%29
"""
is_real = True
is_positive = True
is_negative = False # XXX Forces is_negative/is_nonnegative
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = []
def _latex(self, printer):
return r"e"
@staticmethod
def __abs__():
return S.Exp1
def __int__(self):
return 2
def _as_mpf_val(self, prec):
return mpf_e(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(2), Integer(3))
elif issubclass(number_cls, Rational):
pass
def _eval_power(self, expt):
from sympy import exp
return exp(expt)
def _eval_rewrite_as_sin(self):
from sympy import sin
I = S.ImaginaryUnit
return sin(I + S.Pi/2) - I*sin(I)
def _eval_rewrite_as_cos(self):
from sympy import cos
I = S.ImaginaryUnit
return cos(I) + I*cos(I + S.Pi/2)
def _sage_(self):
import sage.all as sage
return sage.e
E = S.Exp1
class Pi(with_metaclass(Singleton, NumberSymbol)):
r"""The `\pi` constant.
The transcendental number `\pi = 3.141592654\dots` represents the ratio
of a circle's circumference to its diameter, the area of the unit circle,
the half-period of trigonometric functions, and many other things
in mathematics.
Pi is a singleton, and can be accessed by ``S.Pi``, or can
be imported as ``pi``.
Examples
========
>>> from sympy import S, pi, oo, sin, exp, integrate, Symbol
>>> S.Pi
pi
>>> pi > 3
True
>>> pi.is_irrational
True
>>> x = Symbol('x')
>>> sin(x + 2*pi)
sin(x)
>>> integrate(exp(-x**2), (x, -oo, oo))
sqrt(pi)
References
==========
.. [1] http://en.wikipedia.org/wiki/Pi
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = False
is_transcendental = True
__slots__ = []
def _latex(self, printer):
return r"\pi"
@staticmethod
def __abs__():
return S.Pi
def __int__(self):
return 3
def _as_mpf_val(self, prec):
return mpf_pi(prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (Integer(3), Integer(4))
elif issubclass(number_cls, Rational):
return (Rational(223, 71), Rational(22, 7))
def _sage_(self):
import sage.all as sage
return sage.pi
pi = S.Pi
class GoldenRatio(with_metaclass(Singleton, NumberSymbol)):
r"""The golden ratio, `\phi`.
`\phi = \frac{1 + \sqrt{5}}{2}` is algebraic number. Two quantities
are in the golden ratio if their ratio is the same as the ratio of
their sum to the larger of the two quantities, i.e. their maximum.
GoldenRatio is a singleton, and can be accessed by ``S.GoldenRatio``.
Examples
========
>>> from sympy import S
>>> S.GoldenRatio > 1
True
>>> S.GoldenRatio.expand(func=True)
1/2 + sqrt(5)/2
>>> S.GoldenRatio.is_irrational
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Golden_ratio
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = []
def _latex(self, printer):
return r"\phi"
def __int__(self):
return 1
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
rv = mlib.from_man_exp(phi_fixed(prec + 10), -prec - 10)
return mpf_norm(rv, prec)
def _eval_expand_func(self, **hints):
from sympy import sqrt
return S.Half + S.Half*sqrt(5)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.One, Rational(2))
elif issubclass(number_cls, Rational):
pass
def _sage_(self):
import sage.all as sage
return sage.golden_ratio
class EulerGamma(with_metaclass(Singleton, NumberSymbol)):
r"""The Euler-Mascheroni constant.
`\gamma = 0.5772157\dots` (also called Euler's constant) is a mathematical
constant recurring in analysis and number theory. It is defined as the
limiting difference between the harmonic series and the
natural logarithm:
.. math:: \gamma = \lim\limits_{n\to\infty}
\left(\sum\limits_{k=1}^n\frac{1}{k} - \ln n\right)
EulerGamma is a singleton, and can be accessed by ``S.EulerGamma``.
Examples
========
>>> from sympy import S
>>> S.EulerGamma.is_irrational
>>> S.EulerGamma > 0
True
>>> S.EulerGamma > 1
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Euler%E2%80%93Mascheroni_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = []
def _latex(self, printer):
return r"\gamma"
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.libhyper.euler_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (S.Half, Rational(3, 5))
def _sage_(self):
import sage.all as sage
return sage.euler_gamma
class Catalan(with_metaclass(Singleton, NumberSymbol)):
r"""Catalan's constant.
`K = 0.91596559\dots` is given by the infinite series
.. math:: K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}
Catalan is a singleton, and can be accessed by ``S.Catalan``.
Examples
========
>>> from sympy import S
>>> S.Catalan.is_irrational
>>> S.Catalan > 0
True
>>> S.Catalan > 1
False
References
==========
.. [1] http://en.wikipedia.org/wiki/Catalan%27s_constant
"""
is_real = True
is_positive = True
is_negative = False
is_irrational = None
is_number = True
__slots__ = []
def __int__(self):
return 0
def _as_mpf_val(self, prec):
# XXX track down why this has to be increased
v = mlib.catalan_fixed(prec + 10)
rv = mlib.from_man_exp(v, -prec - 10)
return mpf_norm(rv, prec)
def approximation_interval(self, number_cls):
if issubclass(number_cls, Integer):
return (S.Zero, S.One)
elif issubclass(number_cls, Rational):
return (Rational(9, 10), S.One)
def _sage_(self):
import sage.all as sage
return sage.catalan
class ImaginaryUnit(with_metaclass(Singleton, AtomicExpr)):
r"""The imaginary unit, `i = \sqrt{-1}`.
I is a singleton, and can be accessed by ``S.I``, or can be
imported as ``I``.
Examples
========
>>> from sympy import I, sqrt
>>> sqrt(-1)
I
>>> I*I
-1
>>> 1/I
-I
References
==========
.. [1] http://en.wikipedia.org/wiki/Imaginary_unit
"""
is_commutative = True
is_imaginary = True
is_finite = True
is_number = True
is_algebraic = True
is_transcendental = False
__slots__ = []
def _latex(self, printer):
return r"i"
@staticmethod
def __abs__():
return S.One
def _eval_evalf(self, prec):
return self
def _eval_conjugate(self):
return -S.ImaginaryUnit
def _eval_power(self, expt):
"""
b is I = sqrt(-1)
e is symbolic object but not equal to 0, 1
I**r -> (-1)**(r/2) -> exp(r/2*Pi*I) -> sin(Pi*r/2) + cos(Pi*r/2)*I, r is decimal
I**0 mod 4 -> 1
I**1 mod 4 -> I
I**2 mod 4 -> -1
I**3 mod 4 -> -I
"""
if isinstance(expt, Number):
if isinstance(expt, Integer):
expt = expt.p % 4
if expt == 0:
return S.One
if expt == 1:
return S.ImaginaryUnit
if expt == 2:
return -S.One
return -S.ImaginaryUnit
return (S.NegativeOne)**(expt*S.Half)
return
def as_base_exp(self):
return S.NegativeOne, S.Half
def _sage_(self):
import sage.all as sage
return sage.I
I = S.ImaginaryUnit
def sympify_fractions(f):
return Rational(f.numerator, f.denominator)
converter[fractions.Fraction] = sympify_fractions
try:
if HAS_GMPY == 2:
import gmpy2 as gmpy
elif HAS_GMPY == 1:
import gmpy
else:
raise ImportError
def sympify_mpz(x):
return Integer(long(x))
def sympify_mpq(x):
return Rational(long(x.numerator), long(x.denominator))
converter[type(gmpy.mpz(1))] = sympify_mpz
converter[type(gmpy.mpq(1, 2))] = sympify_mpq
except ImportError:
pass
def sympify_mpmath(x):
return Expr._from_mpmath(x, x.context.prec)
converter[mpnumeric] = sympify_mpmath
def sympify_complex(a):
real, imag = list(map(sympify, (a.real, a.imag)))
return real + S.ImaginaryUnit*imag
converter[complex] = sympify_complex
_intcache[0] = S.Zero
_intcache[1] = S.One
_intcache[-1] = S.NegativeOne
from .power import Pow, integer_nthroot
from .mul import Mul
Mul.identity = One()
from .add import Add
Add.identity = Zero()
|
OsirisSPS/osiris-sps
|
refs/heads/master
|
client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/plat-mac/lib-scriptpackages/Terminal/__init__.py
|
73
|
"""
Package generated from /Applications/Utilities/Terminal.app
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the Terminal module is removed.", stacklevel=2)
import aetools
Error = aetools.Error
import Standard_Suite
import Text_Suite
import Terminal_Suite
_code_to_module = {
'????' : Standard_Suite,
'????' : Text_Suite,
'trmx' : Terminal_Suite,
}
_code_to_fullname = {
'????' : ('Terminal.Standard_Suite', 'Standard_Suite'),
'????' : ('Terminal.Text_Suite', 'Text_Suite'),
'trmx' : ('Terminal.Terminal_Suite', 'Terminal_Suite'),
}
from Standard_Suite import *
from Text_Suite import *
from Terminal_Suite import *
def getbaseclasses(v):
if not getattr(v, '_propdict', None):
v._propdict = {}
v._elemdict = {}
for superclassname in getattr(v, '_superclassnames', []):
superclass = eval(superclassname)
getbaseclasses(superclass)
v._propdict.update(getattr(superclass, '_propdict', {}))
v._elemdict.update(getattr(superclass, '_elemdict', {}))
v._propdict.update(getattr(v, '_privpropdict', {}))
v._elemdict.update(getattr(v, '_privelemdict', {}))
import StdSuites
#
# Set property and element dictionaries now that all classes have been defined
#
getbaseclasses(color)
getbaseclasses(window)
getbaseclasses(application)
getbaseclasses(item)
getbaseclasses(document)
getbaseclasses(window)
getbaseclasses(application)
getbaseclasses(character)
getbaseclasses(attachment)
getbaseclasses(paragraph)
getbaseclasses(word)
getbaseclasses(attribute_run)
getbaseclasses(text)
#
# Indices of types declared in this module
#
_classdeclarations = {
'colr' : color,
'cwin' : window,
'capp' : application,
'cobj' : item,
'docu' : document,
'cwin' : window,
'capp' : application,
'cha ' : character,
'atts' : attachment,
'cpar' : paragraph,
'cwor' : word,
'catr' : attribute_run,
'ctxt' : text,
}
class Terminal(Standard_Suite_Events,
Text_Suite_Events,
Terminal_Suite_Events,
aetools.TalkTo):
_signature = 'trmx'
_moduleName = 'Terminal'
_elemdict = application._elemdict
_propdict = application._propdict
|
gavioto/folly
|
refs/heads/master
|
folly/build/generate_format_tables.py
|
61
|
#!/usr/bin/env python
#
# Generate Format tables
import os
from optparse import OptionParser
OUTPUT_FILE = "FormatTables.cpp"
def generate_table(f, type_name, name, map):
f.write("extern const {0} {1}[] = {{".format(type_name, name))
for i in range(0, 256):
if i % 2 == 0:
f.write("\n ")
f.write("{0}::{1}, ".format(type_name, map.get(chr(i), "INVALID")))
f.write("\n};\n\n")
def generate_conv_table(f, name, values):
values = list(values)
line = ''
for i, v in enumerate(values):
if i == 0:
f.write("extern const char {0}[{1}][{2}] = {{\n".format(
name, len(values), len(v)))
row = "{{{0}}}, ".format(", ".join("'{0}'".format(x) for x in v))
if len(line) + len(row) > 79:
f.write(line + "\n")
line = ''
line += row
if line:
f.write(line + "\n")
f.write("};\n\n")
def octal_values():
return (tuple("{0:03o}".format(x)) for x in range(512))
def hex_values(upper):
fmt = "{0:02X}" if upper else "{0:02x}"
return (tuple(fmt.format(x)) for x in range(256))
def binary_values():
return (tuple("{0:08b}".format(x)) for x in range(256))
def generate(f):
f.write("#include <folly/FormatArg.h>\n"
"\n"
"namespace folly {\n"
"namespace detail {\n"
"\n")
generate_table(
f, "FormatArg::Align", "formatAlignTable",
{"<": "LEFT", ">": "RIGHT", "=": "PAD_AFTER_SIGN", "^": "CENTER"})
generate_table(
f, "FormatArg::Sign", "formatSignTable",
{"+": "PLUS_OR_MINUS", "-": "MINUS", " ": "SPACE_OR_MINUS"})
generate_conv_table(f, "formatOctal", octal_values())
generate_conv_table(f, "formatHexLower", hex_values(False))
generate_conv_table(f, "formatHexUpper", hex_values(True))
generate_conv_table(f, "formatBinary", binary_values())
f.write("} // namespace detail\n"
"} // namespace folly\n")
def main():
parser = OptionParser()
parser.add_option("--install_dir", dest="install_dir", default=".",
help="write output to DIR", metavar="DIR")
parser.add_option("--fbcode_dir")
(options, args) = parser.parse_args()
f = open(os.path.join(options.install_dir, OUTPUT_FILE), "w")
generate(f)
f.close()
if __name__ == "__main__":
main()
|
dednal/chromium.src
|
refs/heads/nw12
|
tools/git/git-diff-ide.py
|
197
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Invokes git diff [args...] and inserts file:line in front of each line of diff
output where possible.
This is useful from an IDE that allows you to double-click lines that begin
with file:line to open and jump to that point in the file.
Synopsis:
%prog [git diff args...]
Examples:
%prog
%prog HEAD
"""
import subprocess
import sys
def GitShell(args, ignore_return=False):
"""A shell invocation suitable for communicating with git. Returns
output as list of lines, raises exception on error.
"""
job = subprocess.Popen(args,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
(out, err) = job.communicate()
if job.returncode != 0 and not ignore_return:
print out
raise Exception("Error %d running command %s" % (
job.returncode, args))
return out.split('\n')
def PrintGitDiff(extra_args):
"""Outputs git diff extra_args with file:line inserted into relevant lines."""
current_file = '';
line_num = 0;
lines = GitShell('git diff %s' % ' '.join(extra_args))
for line in lines:
# Pass-through lines:
# diff --git a/file.c b/file.c
# index 0e38c2d..8cd69ae 100644
# --- a/file.c
if (line.startswith('diff ') or
line.startswith('index ') or
line.startswith('--- ')):
print line
continue
# Get the filename from the +++ line:
# +++ b/file.c
if line.startswith('+++ '):
# Filename might be /dev/null or a/file or b/file.
# Skip the first two characters unless it starts with /.
current_file = line[4:] if line[4] == '/' else line[6:]
print line
continue
# Update line number from the @@ lines:
# @@ -41,9 +41,9 @@ def MyFunc():
# ^^
if line.startswith('@@ '):
_, old_nr, new_nr, _ = line.split(' ', 3)
line_num = int(new_nr.split(',')[0])
print line
continue
print current_file + ':' + repr(line_num) + ':' + line
# Increment line number for lines that start with ' ' or '+':
# @@ -41,4 +41,4 @@ def MyFunc():
# file.c:41: // existing code
# file.c:42: // existing code
# file.c:43:-// deleted code
# file.c:43:-// deleted code
# file.c:43:+// inserted code
# file.c:44:+// inserted code
if line.startswith(' ') or line.startswith('+'):
line_num += 1
def main():
PrintGitDiff(sys.argv[1:])
if __name__ == '__main__':
main()
|
tuc-osg/micropython
|
refs/heads/master
|
esp8266/modules/ds18x20.py
|
33
|
# DS18x20 temperature sensor driver for MicroPython.
# MIT license; Copyright (c) 2016 Damien P. George
from micropython import const
_CONVERT = const(0x44)
_RD_SCRATCH = const(0xbe)
_WR_SCRATCH = const(0x4e)
class DS18X20:
def __init__(self, onewire):
self.ow = onewire
self.buf = bytearray(9)
def scan(self):
return [rom for rom in self.ow.scan() if rom[0] == 0x10 or rom[0] == 0x28]
def convert_temp(self):
self.ow.reset(True)
self.ow.writebyte(self.ow.SKIP_ROM)
self.ow.writebyte(_CONVERT)
def read_scratch(self, rom):
self.ow.reset(True)
self.ow.select_rom(rom)
self.ow.writebyte(_RD_SCRATCH)
self.ow.readinto(self.buf)
if self.ow.crc8(self.buf):
raise Exception('CRC error')
return self.buf
def write_scratch(self, rom, buf):
self.ow.reset(True)
self.ow.select_rom(rom)
self.ow.writebyte(_WR_SCRATCH)
self.ow.write(buf)
def read_temp(self, rom):
buf = self.read_scratch(rom)
if rom[0] == 0x10:
if buf[1]:
t = buf[0] >> 1 | 0x80
t = -((~t + 1) & 0xff)
else:
t = buf[0] >> 1
return t - 0.25 + (buf[7] - buf[6]) / buf[7]
else:
t = buf[1] << 8 | buf[0]
if t & 0x8000: # sign bit set
t = -((t ^ 0xffff) + 1)
return t / 16
|
fangxingli/hue
|
refs/heads/master
|
apps/beeswax/src/beeswax/urls.py
|
1
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls import patterns, url
urlpatterns = patterns('beeswax.views',
url(r'^$', 'index', name='index'),
url(r'^execute/?$', 'execute_query', name='execute_query'),
url(r'^item_query/?$', 'item_query', name='item_query'),
url(r'^execute/design/(?P<design_id>\d+)$', 'execute_query', name='execute_design'),
url(r'^execute/query/(?P<query_history_id>\d+)$', 'execute_query', name='watch_query_history'),
url(r'^results/(?P<id>\d+)/(?P<first_row>\d+)$', 'view_results', name='view_results'),
url(r'^download/(?P<id>\d+)/(?P<format>\w+)$', 'download', name='download'),
url(r'^my_queries$', 'my_queries', name='my_queries'),
url(r'^list_designs$', 'list_designs', name='list_designs'),
url(r'^list_trashed_designs$', 'list_trashed_designs', name='list_trashed_designs'),
url(r'^delete_designs$', 'delete_design', name='delete_design'),
url(r'^restore_designs$', 'restore_design', name='restore_design'),
url(r'^clone_design/(?P<design_id>\d+)$', 'clone_design', name='clone_design'),
url(r'^query_history$', 'list_query_history', name='list_query_history'),
url(r'^configuration/?$', 'configuration', name='configuration'),
url(r'^install_examples$', 'install_examples', name='install_examples'),
url(r'^query_cb/done/(?P<server_id>\S+)$', 'query_done_cb', name='query_done_cb'),
)
urlpatterns += patterns(
'beeswax.create_database',
url(r'^create/database$', 'create_database', name='create_database'),
)
urlpatterns += patterns(
'beeswax.create_table',
url(r'^create/create_table/(?P<database>\w+)$', 'create_table', name='create_table'),
url(r'^create/import_wizard/(?P<database>\w+)$', 'import_wizard', name='import_wizard'),
url(r'^create/auto_load/(?P<database>\w+)$', 'load_after_create', name='load_after_create'),
)
urlpatterns += patterns(
'beeswax.api',
url(r'^api/session/?$', 'get_session', name='api_get_session'),
url(r'^api/session/(?P<session_id>\d+)/?$', 'get_session', name='api_get_session'),
url(r'^api/session/(?P<session_id>\d+)/close/?$', 'close_session', name='api_close_session'),
url(r'^api/settings/?$', 'get_settings', name='get_settings'),
url(r'^api/functions/?$', 'get_functions', name='get_functions'),
# Deprecated by Notebook API
url(r'^api/autocomplete/?$', 'autocomplete', name='api_autocomplete_databases'),
url(r'^api/autocomplete/(?P<database>\w+)/?$', 'autocomplete', name='api_autocomplete_tables'),
url(r'^api/autocomplete/(?P<database>\w+)/(?P<table>\w+)/?$', 'autocomplete', name='api_autocomplete_columns'),
url(r'^api/autocomplete/(?P<database>\w+)/(?P<table>\w+)/(?P<column>\w+)/?$', 'autocomplete', name='api_autocomplete_column'),
url(r'^api/autocomplete/(?P<database>\w+)/(?P<table>\w+)/(?P<column>\w+)/(?P<nested>.+)/?$', 'autocomplete', name='api_autocomplete_nested'),
url(r'^api/design/(?P<design_id>\d+)?$', 'save_query_design', name='api_save_design'),
url(r'^api/design/(?P<design_id>\d+)/get$', 'fetch_saved_design', name='api_fetch_saved_design'),
url(r'^api/query/(?P<query_history_id>\d+)/get$', 'fetch_query_history', name='api_fetch_query_history'),
url(r'^api/query/parameters$', 'parameters', name='api_parameters'),
url(r'^api/query/execute/(?P<design_id>\d+)?$', 'execute', name='api_execute'),
url(r'^api/query/(?P<query_history_id>\d+)/cancel/?$', 'cancel_query', name='api_cancel_query'),
url(r'^api/query/(?P<query_history_id>\d+)/close/?$', 'close_operation', name='api_close_operation'),
url(r'^api/query/(?P<query_history_id>\d+)/results/save/hive/table/?$', 'save_results_hive_table', name='api_save_results_hive_table'),
url(r'^api/query/(?P<query_history_id>\d+)/results/save/hdfs/file/?$', 'save_results_hdfs_file', name='api_save_results_hdfs_file'),
url(r'^api/query/(?P<query_history_id>\d+)/results/save/hdfs/directory/?$', 'save_results_hdfs_directory', name='api_save_results_hdfs_directory'),
url(r'^api/watch/json/(?P<id>\d+)/?$', 'watch_query_refresh_json', name='api_watch_query_refresh_json'),
url(r'^api/query/clear_history/?$', 'clear_history', name='clear_history'),
url(r'^api/table/(?P<database>\w+)/(?P<table>\w+)/?$', 'describe_table', name='describe_table'),
url(r'^api/table/(?P<database>\w+)/(?P<table>\w+)/indexes/?$', 'get_indexes', name='get_indexes'),
url(r'^api/table/(?P<database>\w+)/(?P<table>\w+)/sample/?$', 'get_sample_data', name='get_sample_data'),
url(r'^api/table/(?P<database>\w+)/(?P<table>\w+)/(?P<column>\w+)/sample/?$', 'get_sample_data', name='get_sample_data_column'),
url(r'^api/table/(?P<database>\w+)/(?P<table>\w+)/stats/(?P<column>\w+)?$', 'get_table_stats', name='get_table_stats'),
url(r'^api/table/(?P<database>\w+)/(?P<table>\w+)/terms/(?P<column>\w+)/(?P<prefix>\w+)?$', 'get_top_terms', name='get_top_terms'),
url(r'^api/analyze/(?P<database>\w+)/(?P<table>\w+)/(?P<columns>\w+)?$', 'analyze_table', name='analyze_table'),
)
|
ctasims/Dive-Into-Python-3
|
refs/heads/master
|
examples/plural4.py
|
1
|
'''Pluralize English nouns (stage 4)
Command line usage:
$ python plural4.py noun
nouns
'''
import re
def build_match_and_apply_functions(pattern, search, replace):
def matches_rule(word):
return re.search(pattern, word)
def apply_rule(word):
return re.sub(search, replace, word)
return (matches_rule, apply_rule)
rules = []
with open('plural4-rules.txt', encoding='utf-8') as pattern_file:
for line in pattern_file:
pattern, search, replace = line.split(None, 3)
rules.append(build_match_and_apply_functions(
pattern, search, replace))
def plural(noun):
for matches_rule, apply_rule in rules:
if matches_rule(noun):
return apply_rule(noun)
if __name__ == '__main__':
import sys
if sys.argv[1:]:
print(plural(sys.argv[1]))
else:
print(__doc__)
# Copyright (c) 2009, Mark Pilgrim, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
Peddle/hue
|
refs/heads/master
|
desktop/core/ext-py/lxml-3.3.6/src/lxml/tests/test_etree.py
|
11
|
# -*- coding: utf-8 -*-
"""
Tests specific to the extended etree API
Tests that apply to the general ElementTree API should go into
test_elementtree
"""
import os.path
import unittest
import copy
import sys
import re
import gc
import operator
import tempfile
import zlib
import gzip
this_dir = os.path.dirname(__file__)
if this_dir not in sys.path:
sys.path.insert(0, this_dir) # needed for Py3
from common_imports import etree, StringIO, BytesIO, HelperTestCase
from common_imports import fileInTestDir, fileUrlInTestDir, read_file, path2url
from common_imports import SillyFileLike, LargeFileLikeUnicode, doctest, make_doctest
from common_imports import canonicalize, sorted, _str, _bytes
print("")
print("TESTED VERSION: %s" % etree.__version__)
print(" Python: " + repr(sys.version_info))
print(" lxml.etree: " + repr(etree.LXML_VERSION))
print(" libxml used: " + repr(etree.LIBXML_VERSION))
print(" libxml compiled: " + repr(etree.LIBXML_COMPILED_VERSION))
print(" libxslt used: " + repr(etree.LIBXSLT_VERSION))
print(" libxslt compiled: " + repr(etree.LIBXSLT_COMPILED_VERSION))
print("")
try:
_unicode = unicode
except NameError:
# Python 3
_unicode = str
class ETreeOnlyTestCase(HelperTestCase):
"""Tests only for etree, not ElementTree"""
etree = etree
def test_version(self):
self.assertTrue(isinstance(etree.__version__, _unicode))
self.assertTrue(isinstance(etree.LXML_VERSION, tuple))
self.assertEqual(len(etree.LXML_VERSION), 4)
self.assertTrue(isinstance(etree.LXML_VERSION[0], int))
self.assertTrue(isinstance(etree.LXML_VERSION[1], int))
self.assertTrue(isinstance(etree.LXML_VERSION[2], int))
self.assertTrue(isinstance(etree.LXML_VERSION[3], int))
self.assertTrue(etree.__version__.startswith(
str(etree.LXML_VERSION[0])))
def test_c_api(self):
if hasattr(self.etree, '__pyx_capi__'):
# newer Pyrex compatible C-API
self.assertTrue(isinstance(self.etree.__pyx_capi__, dict))
self.assertTrue(len(self.etree.__pyx_capi__) > 0)
else:
# older C-API mechanism
self.assertTrue(hasattr(self.etree, '_import_c_api'))
def test_element_names(self):
Element = self.etree.Element
el = Element('name')
self.assertEqual(el.tag, 'name')
el = Element('{}name')
self.assertEqual(el.tag, 'name')
def test_element_name_empty(self):
Element = self.etree.Element
el = Element('name')
self.assertRaises(ValueError, Element, '{}')
self.assertRaises(ValueError, setattr, el, 'tag', '{}')
self.assertRaises(ValueError, Element, '{test}')
self.assertRaises(ValueError, setattr, el, 'tag', '{test}')
def test_element_name_colon(self):
Element = self.etree.Element
self.assertRaises(ValueError, Element, 'p:name')
self.assertRaises(ValueError, Element, '{test}p:name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', 'p:name')
def test_element_name_quote(self):
Element = self.etree.Element
self.assertRaises(ValueError, Element, "p'name")
self.assertRaises(ValueError, Element, 'p"name')
self.assertRaises(ValueError, Element, "{test}p'name")
self.assertRaises(ValueError, Element, '{test}p"name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', "p'name")
self.assertRaises(ValueError, setattr, el, 'tag', 'p"name')
def test_element_name_space(self):
Element = self.etree.Element
self.assertRaises(ValueError, Element, ' name ')
self.assertRaises(ValueError, Element, 'na me')
self.assertRaises(ValueError, Element, '{test} name')
el = Element('name')
self.assertRaises(ValueError, setattr, el, 'tag', ' name ')
def test_subelement_name_empty(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, '{}')
self.assertRaises(ValueError, SubElement, el, '{test}')
def test_subelement_name_colon(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, 'p:name')
self.assertRaises(ValueError, SubElement, el, '{test}p:name')
def test_subelement_name_quote(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, "p'name")
self.assertRaises(ValueError, SubElement, el, "{test}p'name")
self.assertRaises(ValueError, SubElement, el, 'p"name')
self.assertRaises(ValueError, SubElement, el, '{test}p"name')
def test_subelement_name_space(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, ' name ')
self.assertRaises(ValueError, SubElement, el, 'na me')
self.assertRaises(ValueError, SubElement, el, '{test} name')
def test_subelement_attribute_invalid(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element('name')
self.assertRaises(ValueError, SubElement, el, 'name', {'a b c' : 'abc'})
self.assertRaises(ValueError, SubElement, el, 'name', {'a' : 'a\0\n'})
self.assertEqual(0, len(el))
def test_qname_empty(self):
QName = self.etree.QName
self.assertRaises(ValueError, QName, '')
self.assertRaises(ValueError, QName, 'test', '')
def test_qname_colon(self):
QName = self.etree.QName
self.assertRaises(ValueError, QName, 'p:name')
self.assertRaises(ValueError, QName, 'test', 'p:name')
def test_qname_space(self):
QName = self.etree.QName
self.assertRaises(ValueError, QName, ' name ')
self.assertRaises(ValueError, QName, 'na me')
self.assertRaises(ValueError, QName, 'test', ' name')
def test_qname_namespace_localname(self):
# ET doesn't have namespace/localname properties on QNames
QName = self.etree.QName
namespace, localname = 'http://myns', 'a'
qname = QName(namespace, localname)
self.assertEqual(namespace, qname.namespace)
self.assertEqual(localname, qname.localname)
def test_qname_element(self):
# ET doesn't have namespace/localname properties on QNames
QName = self.etree.QName
qname1 = QName('http://myns', 'a')
a = self.etree.Element(qname1, nsmap={'p' : 'http://myns'})
qname2 = QName(a)
self.assertEqual(a.tag, qname1.text)
self.assertEqual(qname1.text, qname2.text)
self.assertEqual(qname1, qname2)
def test_qname_text_resolve(self):
# ET doesn't resove QNames as text values
etree = self.etree
qname = etree.QName('http://myns', 'a')
a = etree.Element(qname, nsmap={'p' : 'http://myns'})
a.text = qname
self.assertEqual("p:a", a.text)
def test_nsmap_prefix_invalid(self):
etree = self.etree
self.assertRaises(ValueError,
etree.Element, "root", nsmap={'"' : 'testns'})
self.assertRaises(ValueError,
etree.Element, "root", nsmap={'&' : 'testns'})
self.assertRaises(ValueError,
etree.Element, "root", nsmap={'a:b' : 'testns'})
def test_attribute_has_key(self):
# ET in Py 3.x has no "attrib.has_key()" method
XML = self.etree.XML
root = XML(_bytes('<foo bar="Bar" xmlns:ns="http://ns.codespeak.net/test" ns:baz="Baz" />'))
self.assertEqual(
True, root.attrib.has_key('bar'))
self.assertEqual(
False, root.attrib.has_key('baz'))
self.assertEqual(
False, root.attrib.has_key('hah'))
self.assertEqual(
True,
root.attrib.has_key('{http://ns.codespeak.net/test}baz'))
def test_attribute_set(self):
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.get("attr"))
def test_attrib_and_keywords(self):
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.attrib["attr"])
root2 = Element("root2", root.attrib, attr2='TOAST')
self.assertEqual("TEST", root2.attrib["attr"])
self.assertEqual("TOAST", root2.attrib["attr2"])
self.assertEqual(None, root.attrib.get("attr2"))
def test_attrib_order(self):
Element = self.etree.Element
keys = ["attr%d" % i for i in range(10)]
values = ["TEST-%d" % i for i in range(10)]
items = list(zip(keys, values))
root = Element("root")
for key, value in items:
root.set(key, value)
self.assertEqual(keys, root.attrib.keys())
self.assertEqual(values, root.attrib.values())
root2 = Element("root2", root.attrib,
attr_99='TOAST-1', attr_98='TOAST-2')
self.assertEqual(['attr_98', 'attr_99'] + keys,
root2.attrib.keys())
self.assertEqual(['TOAST-2', 'TOAST-1'] + values,
root2.attrib.values())
self.assertEqual(keys, root.attrib.keys())
self.assertEqual(values, root.attrib.values())
def test_attribute_set_invalid(self):
# ElementTree accepts arbitrary attribute values
# lxml.etree allows only strings
Element = self.etree.Element
root = Element("root")
self.assertRaises(TypeError, root.set, "newattr", 5)
self.assertRaises(TypeError, root.set, "newattr", None)
def test_strip_attributes(self):
XML = self.etree.XML
xml = _bytes('<test a="5" b="10" c="20"><x a="4" b="2"/></test>')
root = XML(xml)
self.etree.strip_attributes(root, 'a')
self.assertEqual(_bytes('<test b="10" c="20"><x b="2"></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_attributes(root, 'b', 'c')
self.assertEqual(_bytes('<test a="5"><x a="4"></x></test>'),
self._writeElement(root))
def test_strip_attributes_ns(self):
XML = self.etree.XML
xml = _bytes('<test xmlns:n="http://test/ns" a="6" b="10" c="20" n:a="5"><x a="4" n:b="2"/></test>')
root = XML(xml)
self.etree.strip_attributes(root, 'a')
self.assertEqual(
_bytes('<test xmlns:n="http://test/ns" b="10" c="20" n:a="5"><x n:b="2"></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_attributes(root, '{http://test/ns}a', 'c')
self.assertEqual(
_bytes('<test xmlns:n="http://test/ns" a="6" b="10"><x a="4" n:b="2"></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_attributes(root, '{http://test/ns}*')
self.assertEqual(
_bytes('<test xmlns:n="http://test/ns" a="6" b="10" c="20"><x a="4"></x></test>'),
self._writeElement(root))
def test_strip_elements(self):
XML = self.etree.XML
xml = _bytes('<test><a><b><c/></b></a><x><a><b/><c/></a></x></test>')
root = XML(xml)
self.etree.strip_elements(root, 'a')
self.assertEqual(_bytes('<test><x></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, 'b', 'c', 'X', 'Y', 'Z')
self.assertEqual(_bytes('<test><a></a><x><a></a></x></test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, 'c')
self.assertEqual(_bytes('<test><a><b></b></a><x><a><b></b></a></x></test>'),
self._writeElement(root))
def test_strip_elements_ns(self):
XML = self.etree.XML
xml = _bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"/>C</b>BT</n:a>AT<x>X<a>A<b xmlns="urn:a"/>BT<c xmlns="urn:x"/>CT</a>AT</x>XT</test>')
root = XML(xml)
self.etree.strip_elements(root, 'a')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>C</b>BT</n:a>AT<x>X</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, '{urn:a}b', 'c')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>C</b>BT</n:a>AT<x>X<a>A<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, '{urn:a}*', 'c')
self.assertEqual(_bytes('<test>TEST<x>X<a>A<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_elements(root, '{urn:a}*', 'c', with_tail=False)
self.assertEqual(_bytes('<test>TESTAT<x>X<a>ABT<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
def test_strip_tags(self):
XML = self.etree.XML
xml = _bytes('<test>TEST<a>A<b>B<c/>CT</b>BT</a>AT<x>X<a>A<b/>BT<c/>CT</a>AT</x>XT</test>')
root = XML(xml)
self.etree.strip_tags(root, 'a')
self.assertEqual(_bytes('<test>TESTA<b>B<c></c>CT</b>BTAT<x>XA<b></b>BT<c></c>CTAT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, 'b', 'c', 'X', 'Y', 'Z')
self.assertEqual(_bytes('<test>TEST<a>ABCTBT</a>AT<x>X<a>ABTCT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, 'c')
self.assertEqual(_bytes('<test>TEST<a>A<b>BCT</b>BT</a>AT<x>X<a>A<b></b>BTCT</a>AT</x>XT</test>'),
self._writeElement(root))
def test_strip_tags_pi_comment(self):
XML = self.etree.XML
PI = self.etree.ProcessingInstruction
Comment = self.etree.Comment
xml = _bytes('<!--comment1-->\n<?PI1?>\n<test>TEST<!--comment2-->XT<?PI2?></test>\n<!--comment3-->\n<?PI1?>')
root = XML(xml)
self.etree.strip_tags(root, PI)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TEST<!--comment2-->XT</test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, Comment)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TESTXT<?PI2?></test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, PI, Comment)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TESTXT</test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, Comment, PI)
self.assertEqual(_bytes('<!--comment1-->\n<?PI1?>\n<test>TESTXT</test>\n<!--comment3-->\n<?PI1?>'),
self._writeElement(root))
def test_strip_tags_pi_comment_all(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
PI = self.etree.ProcessingInstruction
Comment = self.etree.Comment
xml = _bytes('<!--comment1-->\n<?PI1?>\n<test>TEST<!--comment2-->XT<?PI2?></test>\n<!--comment3-->\n<?PI1?>')
root = XML(xml)
self.etree.strip_tags(ElementTree(root), PI)
self.assertEqual(_bytes('<!--comment1-->\n<test>TEST<!--comment2-->XT</test>\n<!--comment3-->'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(ElementTree(root), Comment)
self.assertEqual(_bytes('<?PI1?>\n<test>TESTXT<?PI2?></test>\n<?PI1?>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(ElementTree(root), PI, Comment)
self.assertEqual(_bytes('<test>TESTXT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(ElementTree(root), Comment, PI)
self.assertEqual(_bytes('<test>TESTXT</test>'),
self._writeElement(root))
def test_strip_tags_doc_style(self):
XML = self.etree.XML
xml = _bytes('''
<div>
<div>
I like <strong>sheep</strong>.
<br/>
I like lots of <strong>sheep</strong>.
<br/>
Click <a href="http://www.sheep.com">here</a>
for <a href="http://www.sheep.com">those</a> sheep.
<br/>
</div>
</div>
'''.strip())
root = XML(xml)
self.etree.strip_tags(root, 'a')
self.assertEqual(re.sub(_bytes('</?a[^>]*>'), _bytes(''), xml).replace(_bytes('<br/>'), _bytes('<br></br>')),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, 'a', 'br')
self.assertEqual(re.sub(_bytes('</?a[^>]*>'), _bytes(''),
re.sub(_bytes('<br[^>]*>'), _bytes(''), xml)),
self._writeElement(root))
def test_strip_tags_ns(self):
XML = self.etree.XML
xml = _bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"/>CT</b>BT</n:a>AT<x>X<a>A<b xmlns="urn:a"/>BT<c xmlns="urn:x"/>CT</a>AT</x>XT</test>')
root = XML(xml)
self.etree.strip_tags(root, 'a')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>CT</b>BT</n:a>AT<x>XA<b xmlns="urn:a"></b>BT<c xmlns="urn:x"></c>CTAT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, '{urn:a}b', 'c')
self.assertEqual(_bytes('<test>TEST<n:a xmlns:n="urn:a">A<b>B<c xmlns="urn:c"></c>CT</b>BT</n:a>AT<x>X<a>ABT<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
root = XML(xml)
self.etree.strip_tags(root, '{urn:a}*', 'c')
self.assertEqual(_bytes('<test>TESTA<b>B<c xmlns="urn:c"></c>CT</b>BTAT<x>X<a>ABT<c xmlns="urn:x"></c>CT</a>AT</x>XT</test>'),
self._writeElement(root))
def test_strip_tags_and_remove(self):
# previously crashed
HTML = self.etree.HTML
root = HTML(_bytes('<div><h1>title</h1> <b>foo</b> <p>boo</p></div>'))[0][0]
self.assertEqual(_bytes('<div><h1>title</h1> <b>foo</b> <p>boo</p></div>'),
self.etree.tostring(root))
self.etree.strip_tags(root, 'b')
self.assertEqual(_bytes('<div><h1>title</h1> foo <p>boo</p></div>'),
self.etree.tostring(root))
root.remove(root[0])
self.assertEqual(_bytes('<div><p>boo</p></div>'),
self.etree.tostring(root))
def test_pi(self):
# lxml.etree separates target and text
Element = self.etree.Element
SubElement = self.etree.SubElement
ProcessingInstruction = self.etree.ProcessingInstruction
a = Element('a')
a.append(ProcessingInstruction('foo', 'some more text'))
self.assertEqual(a[0].target, 'foo')
self.assertEqual(a[0].text, 'some more text')
def test_pi_parse(self):
XML = self.etree.XML
root = XML(_bytes("<test><?mypi my test ?></test>"))
self.assertEqual(root[0].target, "mypi")
self.assertEqual(root[0].text, "my test ")
def test_pi_pseudo_attributes_get(self):
XML = self.etree.XML
root = XML(_bytes("<test><?mypi my='1' test=\" abc \" quotes=\"' '\" only names ?></test>"))
self.assertEqual(root[0].target, "mypi")
self.assertEqual(root[0].get('my'), "1")
self.assertEqual(root[0].get('test'), " abc ")
self.assertEqual(root[0].get('quotes'), "' '")
self.assertEqual(root[0].get('only'), None)
self.assertEqual(root[0].get('names'), None)
self.assertEqual(root[0].get('nope'), None)
def test_pi_pseudo_attributes_attrib(self):
XML = self.etree.XML
root = XML(_bytes("<test><?mypi my='1' test=\" abc \" quotes=\"' '\" only names ?></test>"))
self.assertEqual(root[0].target, "mypi")
self.assertEqual(root[0].attrib['my'], "1")
self.assertEqual(root[0].attrib['test'], " abc ")
self.assertEqual(root[0].attrib['quotes'], "' '")
self.assertRaises(KeyError, root[0].attrib.__getitem__, 'only')
self.assertRaises(KeyError, root[0].attrib.__getitem__, 'names')
self.assertRaises(KeyError, root[0].attrib.__getitem__, 'nope')
def test_deepcopy_pi(self):
# previously caused a crash
ProcessingInstruction = self.etree.ProcessingInstruction
a = ProcessingInstruction("PI", "ONE")
b = copy.deepcopy(a)
b.text = "ANOTHER"
self.assertEqual('ONE', a.text)
self.assertEqual('ANOTHER', b.text)
def test_deepcopy_elementtree_pi(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(_bytes("<?mypi my test ?><test/><!--comment -->"))
tree1 = self.etree.ElementTree(root)
self.assertEqual(_bytes("<?mypi my test ?><test/><!--comment -->"),
tostring(tree1))
tree2 = copy.deepcopy(tree1)
self.assertEqual(_bytes("<?mypi my test ?><test/><!--comment -->"),
tostring(tree2))
root2 = copy.deepcopy(tree1.getroot())
self.assertEqual(_bytes("<test/>"),
tostring(root2))
def test_deepcopy_elementtree_dtd(self):
XML = self.etree.XML
tostring = self.etree.tostring
xml = _bytes('<!DOCTYPE test [\n<!ENTITY entity "tasty">\n]>\n<test/>')
root = XML(xml)
tree1 = self.etree.ElementTree(root)
self.assertEqual(xml, tostring(tree1))
tree2 = copy.deepcopy(tree1)
self.assertEqual(xml, tostring(tree2))
root2 = copy.deepcopy(tree1.getroot())
self.assertEqual(_bytes("<test/>"),
tostring(root2))
def test_attribute_set(self):
# ElementTree accepts arbitrary attribute values
# lxml.etree allows only strings
Element = self.etree.Element
root = Element("root")
root.set("attr", "TEST")
self.assertEqual("TEST", root.get("attr"))
self.assertRaises(TypeError, root.set, "newattr", 5)
def test_parse_remove_comments(self):
fromstring = self.etree.fromstring
tostring = self.etree.tostring
XMLParser = self.etree.XMLParser
xml = _bytes('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
parser = XMLParser(remove_comments=True)
root = fromstring(xml, parser)
self.assertEqual(
_bytes('<a><b><c/></b></a>'),
tostring(root))
def test_parse_remove_pis(self):
parse = self.etree.parse
tostring = self.etree.tostring
XMLParser = self.etree.XMLParser
xml = _bytes('<?test?><a><?A?><b><?B?><c/></b><?C?></a><?tail?>')
f = BytesIO(xml)
tree = parse(f)
self.assertEqual(
xml,
tostring(tree))
parser = XMLParser(remove_pis=True)
tree = parse(f, parser)
self.assertEqual(
_bytes('<a><b><c/></b></a>'),
tostring(tree))
def test_parse_parser_type_error(self):
# ET raises IOError only
parse = self.etree.parse
self.assertRaises(TypeError, parse, 'notthere.xml', object())
def test_iterparse_tree_comments(self):
# ET removes comments
iterparse = self.etree.iterparse
tostring = self.etree.tostring
f = BytesIO('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
events = list(iterparse(f))
root = events[-1][1]
self.assertEqual(3, len(events))
self.assertEqual(
_bytes('<a><!--A--><b><!-- B --><c/></b><!--C--></a>'),
tostring(root))
def test_iterparse_comments(self):
# ET removes comments
iterparse = self.etree.iterparse
tostring = self.etree.tostring
def name(event, el):
if event == 'comment':
return el.text
else:
return el.tag
f = BytesIO('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
events = list(iterparse(f, events=('end', 'comment')))
root = events[-1][1]
self.assertEqual(6, len(events))
self.assertEqual(['A', ' B ', 'c', 'b', 'C', 'a'],
[ name(*item) for item in events ])
self.assertEqual(
_bytes('<a><!--A--><b><!-- B --><c/></b><!--C--></a>'),
tostring(root))
def test_iterparse_pis(self):
# ET removes pis
iterparse = self.etree.iterparse
tostring = self.etree.tostring
ElementTree = self.etree.ElementTree
def name(event, el):
if event == 'pi':
return (el.target, el.text)
else:
return el.tag
f = BytesIO('<?pia a?><a><?pib b?><b><?pic c?><c/></b><?pid d?></a><?pie e?>')
events = list(iterparse(f, events=('end', 'pi')))
root = events[-2][1]
self.assertEqual(8, len(events))
self.assertEqual([('pia','a'), ('pib','b'), ('pic','c'), 'c', 'b',
('pid','d'), 'a', ('pie','e')],
[ name(*item) for item in events ])
self.assertEqual(
_bytes('<?pia a?><a><?pib b?><b><?pic c?><c/></b><?pid d?></a><?pie e?>'),
tostring(ElementTree(root)))
def test_iterparse_remove_comments(self):
iterparse = self.etree.iterparse
tostring = self.etree.tostring
f = BytesIO('<a><!--A--><b><!-- B --><c/></b><!--C--></a>')
events = list(iterparse(f, remove_comments=True,
events=('end', 'comment')))
root = events[-1][1]
self.assertEqual(3, len(events))
self.assertEqual(['c', 'b', 'a'],
[ el.tag for (event, el) in events ])
self.assertEqual(
_bytes('<a><b><c/></b></a>'),
tostring(root))
def test_iterparse_broken(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><c/></a>')
# ET raises ExpatError, lxml raises XMLSyntaxError
self.assertRaises(self.etree.XMLSyntaxError, list, iterparse(f))
def test_iterparse_broken_recover(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><c/></a>')
it = iterparse(f, events=('start', 'end'), recover=True)
events = [(ev, el.tag) for ev, el in it]
root = it.root
self.assertTrue(root is not None)
self.assertEqual(1, events.count(('start', 'a')))
self.assertEqual(1, events.count(('end', 'a')))
self.assertEqual(1, events.count(('start', 'b')))
self.assertEqual(1, events.count(('end', 'b')))
self.assertEqual(1, events.count(('start', 'c')))
self.assertEqual(1, events.count(('end', 'c')))
def test_iterparse_broken_multi_recover(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><c/></d><b><c/></a></b>')
it = iterparse(f, events=('start', 'end'), recover=True)
events = [(ev, el.tag) for ev, el in it]
root = it.root
self.assertTrue(root is not None)
self.assertEqual(1, events.count(('start', 'a')))
self.assertEqual(1, events.count(('end', 'a')))
self.assertEqual(2, events.count(('start', 'b')))
self.assertEqual(2, events.count(('end', 'b')))
self.assertEqual(2, events.count(('start', 'c')))
self.assertEqual(2, events.count(('end', 'c')))
def test_iterparse_strip(self):
iterparse = self.etree.iterparse
f = BytesIO("""
<a> \n \n <b> b test </b> \n
\n\t <c> \n </c> </a> \n """)
iterator = iterparse(f, remove_blank_text=True)
text = [ (element.text, element.tail)
for event, element in iterator ]
self.assertEqual(
[(" b test ", None), (" \n ", None), (None, None)],
text)
def test_iterparse_tag(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
def test_iterparse_tag_all(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(
8,
len(events))
def test_iterparse_tag_ns(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{urn:test:1}b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
def test_iterparse_tag_ns_empty(self):
iterparse = self.etree.iterparse
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}b", events=('start', 'end'))
events = list(iterator)
root = iterator.root
self.assertEqual([], events)
def test_iterparse_tag_ns_all(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{urn:test:1}*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(8, len(events))
def test_iterparse_tag_ns_empty_all(self):
iterparse = self.etree.iterparse
f = BytesIO('<a xmlns="urn:test:1"><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}*", events=('start', 'end'))
events = list(iterator)
self.assertEqual([], events)
f = BytesIO('<a><b><d/></b><c/></a>')
iterator = iterparse(f, tag="{}*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(8, len(events))
def test_iterparse_encoding_error(self):
text = _str('Søk på nettet')
wrong_declaration = "<?xml version='1.0' encoding='UTF-8'?>"
xml_latin1 = (_str('%s<a>%s</a>') % (wrong_declaration, text)
).encode('iso-8859-1')
self.assertRaises(self.etree.ParseError,
list, self.etree.iterparse(BytesIO(xml_latin1)))
def test_iterparse_encoding_8bit_override(self):
text = _str('Søk på nettet', encoding="UTF-8")
wrong_declaration = "<?xml version='1.0' encoding='UTF-8'?>"
xml_latin1 = (_str('%s<a>%s</a>') % (wrong_declaration, text)
).encode('iso-8859-1')
iterator = self.etree.iterparse(BytesIO(xml_latin1),
encoding="iso-8859-1")
self.assertEqual(1, len(list(iterator)))
a = iterator.root
self.assertEqual(a.text, text)
def test_iterparse_keep_cdata(self):
tostring = self.etree.tostring
f = BytesIO('<root><![CDATA[test]]></root>')
context = self.etree.iterparse(f, strip_cdata=False)
content = [ el.text for event,el in context ]
self.assertEqual(['test'], content)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(context.root))
def test_parser_encoding_unknown(self):
self.assertRaises(
LookupError, self.etree.XMLParser, encoding="hopefully unknown")
def test_parser_encoding(self):
self.etree.XMLParser(encoding="ascii")
self.etree.XMLParser(encoding="utf-8")
self.etree.XMLParser(encoding="iso-8859-1")
def test_feed_parser_recover(self):
parser = self.etree.XMLParser(recover=True)
parser.feed('<?xml version=')
parser.feed('"1.0"?><ro')
parser.feed('ot><')
parser.feed('a test="works"')
parser.feed('><othertag/></root') # <a> not closed!
parser.feed('>')
root = parser.close()
self.assertEqual(root.tag, "root")
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, "a")
self.assertEqual(root[0].get("test"), "works")
self.assertEqual(len(root[0]), 1)
self.assertEqual(root[0][0].tag, "othertag")
# FIXME: would be nice to get some errors logged ...
#self.assertTrue(len(parser.error_log) > 0, "error log is empty")
def test_elementtree_parser_target_type_error(self):
assertEqual = self.assertEqual
assertFalse = self.assertFalse
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start")
assertFalse(attrib)
assertEqual("TAG", tag)
def end(self, tag):
events.append("end")
assertEqual("TAG", tag)
def close(self):
return "DONE" # no Element!
parser = self.etree.XMLParser(target=Target())
tree = self.etree.ElementTree()
self.assertRaises(TypeError,
tree.parse, BytesIO("<TAG/>"), parser=parser)
self.assertEqual(["start", "end"], events)
def test_parser_target_feed_exception(self):
# ET doesn't call .close() on errors
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
if tag == 'a':
raise ValueError("dead and gone")
def data(self, data):
events.append("data-" + data)
def close(self):
events.append("close")
return "DONE"
parser = self.etree.XMLParser(target=Target())
try:
parser.feed(_bytes('<root>A<a>ca</a>B</root>'))
done = parser.close()
self.fail("error expected, but parsing succeeded")
except ValueError:
done = 'value error received as expected'
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "close"],
events)
def test_parser_target_fromstring_exception(self):
# ET doesn't call .close() on errors
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
if tag == 'a':
raise ValueError("dead and gone")
def data(self, data):
events.append("data-" + data)
def close(self):
events.append("close")
return "DONE"
parser = self.etree.XMLParser(target=Target())
try:
done = self.etree.fromstring(_bytes('<root>A<a>ca</a>B</root>'),
parser=parser)
self.fail("error expected, but parsing succeeded")
except ValueError:
done = 'value error received as expected'
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "close"],
events)
def test_parser_target_comment(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def comment(self, text):
events.append("comment-" + text)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target())
parser.feed(_bytes('<!--a--><root>A<!--b--><sub/><!--c-->B</root><!--d-->'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["comment-a", "start-root", "data-A", "comment-b",
"start-sub", "end-sub", "comment-c", "data-B",
"end-root", "comment-d"],
events)
def test_parser_target_pi(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def pi(self, target, data):
events.append("pi-" + target + "-" + data)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target())
parser.feed(_bytes('<?test a?><root>A<?test b?>B</root><?test c?>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["pi-test-a", "start-root", "data-A", "pi-test-b",
"data-B", "end-root", "pi-test-c"],
events)
def test_parser_target_cdata(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def close(self):
return "DONE"
parser = self.etree.XMLParser(target=Target(),
strip_cdata=False)
parser.feed(_bytes('<root>A<a><![CDATA[ca]]></a>B</root>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "data-B", "end-root"],
events)
def test_parser_target_recover(self):
events = []
class Target(object):
def start(self, tag, attrib):
events.append("start-" + tag)
def end(self, tag):
events.append("end-" + tag)
def data(self, data):
events.append("data-" + data)
def close(self):
events.append("close")
return "DONE"
parser = self.etree.XMLParser(target=Target(),
recover=True)
parser.feed(_bytes('<root>A<a>ca</a>B</not-root>'))
done = parser.close()
self.assertEqual("DONE", done)
self.assertEqual(["start-root", "data-A", "start-a",
"data-ca", "end-a", "data-B",
"end-root", "close"],
events)
def test_iterwalk_tag(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><d/></b><c/></a>'))
iterator = iterwalk(root, tag="b", events=('start', 'end'))
events = list(iterator)
self.assertEqual(
[('start', root[0]), ('end', root[0])],
events)
def test_iterwalk_tag_all(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><d/></b><c/></a>'))
iterator = iterwalk(root, tag="*", events=('start', 'end'))
events = list(iterator)
self.assertEqual(
8,
len(events))
def test_iterwalk(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
events = list(iterwalk(root))
self.assertEqual(
[('end', root[0]), ('end', root[1]), ('end', root)],
events)
def test_iterwalk_start(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
iterator = iterwalk(root, events=('start',))
events = list(iterator)
self.assertEqual(
[('start', root), ('start', root[0]), ('start', root[1])],
events)
def test_iterwalk_start_end(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
iterator = iterwalk(root, events=('start','end'))
events = list(iterator)
self.assertEqual(
[('start', root), ('start', root[0]), ('end', root[0]),
('start', root[1]), ('end', root[1]), ('end', root)],
events)
def test_iterwalk_clear(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b></b><c/></a>'))
iterator = iterwalk(root)
for event, elem in iterator:
elem.clear()
self.assertEqual(0,
len(root))
def test_iterwalk_attrib_ns(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a xmlns="ns1"><b><c xmlns="ns2"/></b></a>'))
attr_name = '{testns}bla'
events = []
iterator = iterwalk(root, events=('start','end','start-ns','end-ns'))
for event, elem in iterator:
events.append(event)
if event == 'start':
if elem.tag != '{ns1}a':
elem.set(attr_name, 'value')
self.assertEqual(
['start-ns', 'start', 'start', 'start-ns', 'start',
'end', 'end-ns', 'end', 'end', 'end-ns'],
events)
self.assertEqual(
None,
root.get(attr_name))
self.assertEqual(
'value',
root[0].get(attr_name))
def test_iterwalk_getiterator(self):
iterwalk = self.etree.iterwalk
root = self.etree.XML(_bytes('<a><b><d/></b><c/></a>'))
counts = []
for event, elem in iterwalk(root):
counts.append(len(list(elem.getiterator())))
self.assertEqual(
[1,2,1,4],
counts)
def test_resolve_string_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_string(
_str('''<!ENTITY myentity "%s">
<!ELEMENT doc ANY>''') % url, context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(root.text, test_url)
def test_resolve_bytes_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_string(
(_str('''<!ENTITY myentity "%s">
<!ELEMENT doc ANY>''') % url).encode('utf-8'),
context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(root.text, test_url)
def test_resolve_filelike_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_file(
SillyFileLike(
_str('''<!ENTITY myentity "%s">
<!ELEMENT doc ANY>''') % url), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(root.text, test_url)
def test_resolve_filename_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(attribute_defaults=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_filename(
fileInTestDir('test.dtd'), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE a SYSTEM "%s"><a><b/></a>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(
root.attrib, {'default': 'valueA'})
self.assertEqual(
root[0].attrib, {'default': 'valueB'})
def test_resolve_filename_dtd_relative(self):
parse = self.etree.parse
parser = self.etree.XMLParser(attribute_defaults=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, fileUrlInTestDir(test_url))
return self.resolve_filename(
fileUrlInTestDir('test.dtd'), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE a SYSTEM "%s"><a><b/></a>') % test_url
tree = parse(StringIO(xml), parser,
base_url=fileUrlInTestDir('__test.xml'))
root = tree.getroot()
self.assertEqual(
root.attrib, {'default': 'valueA'})
self.assertEqual(
root[0].attrib, {'default': 'valueB'})
def test_resolve_file_dtd(self):
parse = self.etree.parse
parser = self.etree.XMLParser(attribute_defaults=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
return self.resolve_file(
open(fileInTestDir('test.dtd'), 'rb'), context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE a SYSTEM "%s"><a><b/></a>') % test_url
tree = parse(StringIO(xml), parser)
root = tree.getroot()
self.assertEqual(
root.attrib, {'default': 'valueA'})
self.assertEqual(
root[0].attrib, {'default': 'valueB'})
def test_resolve_empty(self):
parse = self.etree.parse
parser = self.etree.XMLParser(load_dtd=True)
assertEqual = self.assertEqual
test_url = _str("__nosuch.dtd")
class check(object):
resolved = False
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
assertEqual(url, test_url)
check.resolved = True
return self.resolve_empty(context)
parser.resolvers.add(MyResolver())
xml = _str('<!DOCTYPE doc SYSTEM "%s"><doc>&myentity;</doc>') % test_url
self.assertRaises(etree.XMLSyntaxError, parse, StringIO(xml), parser)
self.assertTrue(check.resolved)
def test_resolve_error(self):
parse = self.etree.parse
parser = self.etree.XMLParser(dtd_validation=True)
class _LocalException(Exception):
pass
class MyResolver(self.etree.Resolver):
def resolve(self, url, id, context):
raise _LocalException
parser.resolvers.add(MyResolver())
xml = '<!DOCTYPE doc SYSTEM "test"><doc>&myentity;</doc>'
self.assertRaises(_LocalException, parse, BytesIO(xml), parser)
if etree.LIBXML_VERSION > (2,6,20):
def test_entity_parse(self):
parse = self.etree.parse
tostring = self.etree.tostring
parser = self.etree.XMLParser(resolve_entities=False)
Entity = self.etree.Entity
xml = _bytes('<!DOCTYPE doc SYSTEM "test"><doc>&myentity;</doc>')
tree = parse(BytesIO(xml), parser)
root = tree.getroot()
self.assertEqual(root[0].tag, Entity)
self.assertEqual(root[0].text, "&myentity;")
self.assertEqual(root[0].tail, None)
self.assertEqual(root[0].name, "myentity")
self.assertEqual(_bytes('<doc>&myentity;</doc>'),
tostring(root))
def test_entity_restructure(self):
xml = _bytes('''<!DOCTYPE root [ <!ENTITY nbsp " "> ]>
<root>
<child1/>
<child2/>
<child3> </child3>
</root>''')
parser = self.etree.XMLParser(resolve_entities=False)
root = etree.fromstring(xml, parser)
self.assertEqual([ el.tag for el in root ],
['child1', 'child2', 'child3'])
root[0] = root[-1]
self.assertEqual([ el.tag for el in root ],
['child3', 'child2'])
self.assertEqual(root[0][0].text, ' ')
self.assertEqual(root[0][0].name, 'nbsp')
def test_entity_append(self):
Entity = self.etree.Entity
Element = self.etree.Element
tostring = self.etree.tostring
root = Element("root")
root.append( Entity("test") )
self.assertEqual(root[0].tag, Entity)
self.assertEqual(root[0].text, "&test;")
self.assertEqual(root[0].tail, None)
self.assertEqual(root[0].name, "test")
self.assertEqual(_bytes('<root>&test;</root>'),
tostring(root))
def test_entity_values(self):
Entity = self.etree.Entity
self.assertEqual(Entity("test").text, '&test;')
self.assertEqual(Entity("#17683").text, '䔓')
self.assertEqual(Entity("#x1768").text, 'ᝨ')
self.assertEqual(Entity("#x98AF").text, '颯')
def test_entity_error(self):
Entity = self.etree.Entity
self.assertRaises(ValueError, Entity, 'a b c')
self.assertRaises(ValueError, Entity, 'a,b')
self.assertRaises(ValueError, Entity, 'a\0b')
self.assertRaises(ValueError, Entity, '#abc')
self.assertRaises(ValueError, Entity, '#xxyz')
def test_cdata(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
tostring = self.etree.tostring
root = Element("root")
root.text = CDATA('test')
self.assertEqual('test',
root.text)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(root))
def test_cdata_type(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
root = Element("root")
root.text = CDATA("test")
self.assertEqual('test', root.text)
root.text = CDATA(_str("test"))
self.assertEqual('test', root.text)
self.assertRaises(TypeError, CDATA, 1)
def test_cdata_errors(self):
CDATA = self.etree.CDATA
Element = self.etree.Element
root = Element("root")
cdata = CDATA('test')
self.assertRaises(TypeError,
setattr, root, 'tail', cdata)
self.assertRaises(TypeError,
root.set, 'attr', cdata)
self.assertRaises(TypeError,
operator.setitem, root.attrib, 'attr', cdata)
def test_cdata_parser(self):
tostring = self.etree.tostring
parser = self.etree.XMLParser(strip_cdata=False)
root = self.etree.XML(_bytes('<root><![CDATA[test]]></root>'), parser)
self.assertEqual('test', root.text)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(root))
def test_cdata_xpath(self):
tostring = self.etree.tostring
parser = self.etree.XMLParser(strip_cdata=False)
root = self.etree.XML(_bytes('<root><![CDATA[test]]></root>'), parser)
self.assertEqual(_bytes('<root><![CDATA[test]]></root>'),
tostring(root))
self.assertEqual(['test'], root.xpath('//text()'))
# TypeError in etree, AssertionError in ElementTree;
def test_setitem_assert(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
self.assertRaises(TypeError,
a.__setitem__, 0, 'foo')
def test_append_error(self):
Element = self.etree.Element
root = Element('root')
# raises AssertionError in ElementTree
self.assertRaises(TypeError, root.append, None)
self.assertRaises(TypeError, root.extend, [None])
self.assertRaises(TypeError, root.extend, [Element('one'), None])
self.assertEqual('one', root[0].tag)
def test_append_recursive_error(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
self.assertRaises(ValueError, root.append, root)
child = SubElement(root, 'child')
self.assertRaises(ValueError, child.append, root)
child2 = SubElement(child, 'child2')
self.assertRaises(ValueError, child2.append, root)
self.assertRaises(ValueError, child2.append, child)
self.assertEqual('child2', root[0][0].tag)
def test_addnext(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
SubElement(root, 'a')
SubElement(root, 'b')
self.assertEqual(['a', 'b'],
[c.tag for c in root])
root[1].addnext(root[0])
self.assertEqual(['b', 'a'],
[c.tag for c in root])
def test_addprevious(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
SubElement(root, 'a')
SubElement(root, 'b')
self.assertEqual(['a', 'b'],
[c.tag for c in root])
root[0].addprevious(root[1])
self.assertEqual(['b', 'a'],
[c.tag for c in root])
def test_addnext_cycle(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(a, 'b')
# appending parent as sibling is forbidden
self.assertRaises(ValueError, b.addnext, a)
self.assertEqual(['a'], [c.tag for c in root])
self.assertEqual(['b'], [c.tag for c in a])
def test_addprevious_cycle(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(a, 'b')
# appending parent as sibling is forbidden
self.assertRaises(ValueError, b.addprevious, a)
self.assertEqual(['a'], [c.tag for c in root])
self.assertEqual(['b'], [c.tag for c in a])
def test_addnext_cycle_long(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(a, 'b')
c = SubElement(b, 'c')
# appending parent as sibling is forbidden
self.assertRaises(ValueError, c.addnext, a)
def test_addprevious_cycle_long(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(a, 'b')
c = SubElement(b, 'c')
# appending parent as sibling is forbidden
self.assertRaises(ValueError, c.addprevious, a)
def test_addprevious_noops(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(root, 'b')
a.addprevious(a)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
b.addprevious(b)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
b.addprevious(a)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
def test_addnext_noops(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
root = Element('root')
a = SubElement(root, 'a')
b = SubElement(root, 'b')
a.addnext(a)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
b.addnext(b)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
a.addnext(b)
self.assertEqual('a', root[0].tag)
self.assertEqual('b', root[1].tag)
def test_addnext_root(self):
Element = self.etree.Element
a = Element('a')
b = Element('b')
self.assertRaises(TypeError, a.addnext, b)
def test_addprevious_pi(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
PI = self.etree.PI
root = Element('root')
SubElement(root, 'a')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addprevious(pi)
self.assertEqual(_bytes('<root><?TARGET TEXT?>TAIL<a></a></root>'),
self._writeElement(root))
def test_addprevious_root_pi(self):
Element = self.etree.Element
PI = self.etree.PI
root = Element('root')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addprevious(pi)
self.assertEqual(_bytes('<?TARGET TEXT?>\n<root></root>'),
self._writeElement(root))
def test_addnext_pi(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
PI = self.etree.PI
root = Element('root')
SubElement(root, 'a')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addnext(pi)
self.assertEqual(_bytes('<root><a></a><?TARGET TEXT?>TAIL</root>'),
self._writeElement(root))
def test_addnext_root_pi(self):
Element = self.etree.Element
PI = self.etree.PI
root = Element('root')
pi = PI('TARGET', 'TEXT')
pi.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addnext(pi)
self.assertEqual(_bytes('<root></root>\n<?TARGET TEXT?>'),
self._writeElement(root))
def test_addnext_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
root = Element('root')
SubElement(root, 'a')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addnext(comment)
self.assertEqual(_bytes('<root><a></a><!--TEXT -->TAIL</root>'),
self._writeElement(root))
def test_addnext_root_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
root = Element('root')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addnext(comment)
self.assertEqual(_bytes('<root></root>\n<!--TEXT -->'),
self._writeElement(root))
def test_addprevious_comment(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
Comment = self.etree.Comment
root = Element('root')
SubElement(root, 'a')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root><a></a></root>'),
self._writeElement(root))
root[0].addprevious(comment)
self.assertEqual(_bytes('<root><!--TEXT -->TAIL<a></a></root>'),
self._writeElement(root))
def test_addprevious_root_comment(self):
Element = self.etree.Element
Comment = self.etree.Comment
root = Element('root')
comment = Comment('TEXT ')
comment.tail = "TAIL"
self.assertEqual(_bytes('<root></root>'),
self._writeElement(root))
root.addprevious(comment)
self.assertEqual(_bytes('<!--TEXT -->\n<root></root>'),
self._writeElement(root))
# ET's Elements have items() and key(), but not values()
def test_attribute_values(self):
XML = self.etree.XML
root = XML(_bytes('<doc alpha="Alpha" beta="Beta" gamma="Gamma"/>'))
values = root.values()
values.sort()
self.assertEqual(['Alpha', 'Beta', 'Gamma'], values)
# gives error in ElementTree
def test_comment_empty(self):
Element = self.etree.Element
Comment = self.etree.Comment
a = Element('a')
a.append(Comment())
self.assertEqual(
_bytes('<a><!----></a>'),
self._writeElement(a))
# ElementTree ignores comments
def test_comment_parse_empty(self):
ElementTree = self.etree.ElementTree
tostring = self.etree.tostring
xml = _bytes('<a><b/><!----><c/></a>')
f = BytesIO(xml)
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
'',
a[1].text)
self.assertEqual(
xml,
tostring(a))
# ElementTree ignores comments
def test_comment_no_proxy_yet(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a><b></b><!-- hoi --><c></c></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
' hoi ',
a[1].text)
# does not raise an exception in ElementTree
def test_comment_immutable(self):
Element = self.etree.Element
Comment = self.etree.Comment
c = Comment()
el = Element('myel')
self.assertRaises(TypeError, c.append, el)
self.assertRaises(TypeError, c.insert, 0, el)
self.assertRaises(TypeError, c.set, "myattr", "test")
def test_comment_immutable_attrib(self):
c = self.etree.Comment()
self.assertEqual(0, len(c.attrib))
self.assertFalse(c.attrib.__contains__('nope'))
self.assertFalse('nope' in c.attrib)
self.assertFalse('nope' in c.attrib.keys())
self.assertFalse('nope' in c.attrib.values())
self.assertFalse(('nope', 'huhu') in c.attrib.items())
self.assertEqual([], list(c.attrib))
self.assertEqual([], list(c.attrib.keys()))
self.assertEqual([], list(c.attrib.items()))
self.assertEqual([], list(c.attrib.values()))
self.assertEqual([], list(c.attrib.iterkeys()))
self.assertEqual([], list(c.attrib.iteritems()))
self.assertEqual([], list(c.attrib.itervalues()))
self.assertEqual('HUHU', c.attrib.pop('nope', 'HUHU'))
self.assertRaises(KeyError, c.attrib.pop, 'nope')
self.assertRaises(KeyError, c.attrib.__getitem__, 'only')
self.assertRaises(KeyError, c.attrib.__getitem__, 'names')
self.assertRaises(KeyError, c.attrib.__getitem__, 'nope')
self.assertRaises(KeyError, c.attrib.__setitem__, 'nope', 'yep')
self.assertRaises(KeyError, c.attrib.__delitem__, 'nope')
# test passing 'None' to dump()
def test_dump_none(self):
self.assertRaises(TypeError, self.etree.dump, None)
def test_prefix(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a xmlns:foo="http://www.infrae.com/ns/1"><foo:b/></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
None,
a.prefix)
self.assertEqual(
'foo',
a[0].prefix)
def test_prefix_default_ns(self):
ElementTree = self.etree.ElementTree
f = BytesIO('<a xmlns="http://www.infrae.com/ns/1"><b/></a>')
doc = ElementTree(file=f)
a = doc.getroot()
self.assertEqual(
None,
a.prefix)
self.assertEqual(
None,
a[0].prefix)
def test_getparent(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
None,
a.getparent())
self.assertEqual(
a,
b.getparent())
self.assertEqual(
b.getparent(),
c.getparent())
self.assertEqual(
b,
d.getparent())
def test_iterchildren(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root.iterchildren():
result.append(el.tag)
self.assertEqual(['one', 'two', 'three'], result)
def test_iterchildren_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<three/></doc>'))
result = []
for el in root.iterchildren(reversed=True):
result.append(el.tag)
self.assertEqual(['three', 'two', 'one'], result)
def test_iterchildren_tag(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren(tag='two'):
result.append(el.text)
self.assertEqual(['Two', 'Bla'], result)
def test_iterchildren_tag_posarg(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren('two'):
result.append(el.text)
self.assertEqual(['Two', 'Bla'], result)
def test_iterchildren_tag_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two></doc>'))
result = []
for el in root.iterchildren(reversed=True, tag='two'):
result.append(el.text)
self.assertEqual(['Bla', 'Two'], result)
def test_iterchildren_tag_multiple(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren(tag=['two', 'three']):
result.append(el.text)
self.assertEqual(['Two', 'Bla', None], result)
def test_iterchildren_tag_multiple_posarg(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren('two', 'three'):
result.append(el.text)
self.assertEqual(['Two', 'Bla', None], result)
def test_iterchildren_tag_multiple_reversed(self):
XML = self.etree.XML
root = XML(_bytes('<doc><one/><two>Two</two>Hm<two>Bla</two><three/></doc>'))
result = []
for el in root.iterchildren(reversed=True, tag=['two', 'three']):
result.append(el.text)
self.assertEqual([None, 'Bla', 'Two'], result)
def test_iterancestors(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.iterancestors()))
self.assertEqual(
[a],
list(b.iterancestors()))
self.assertEqual(
[a],
list(c.iterancestors()))
self.assertEqual(
[b, a],
list(d.iterancestors()))
def test_iterancestors_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[a],
list(d.iterancestors('a')))
self.assertEqual(
[a],
list(d.iterancestors(tag='a')))
self.assertEqual(
[b, a],
list(d.iterancestors('*')))
self.assertEqual(
[b, a],
list(d.iterancestors(tag='*')))
def test_iterancestors_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[b, a],
list(d.iterancestors(tag=('a', 'b'))))
self.assertEqual(
[b, a],
list(d.iterancestors('a', 'b')))
self.assertEqual(
[],
list(d.iterancestors(tag=('w', 'x', 'y', 'z'))))
self.assertEqual(
[],
list(d.iterancestors('w', 'x', 'y', 'z')))
self.assertEqual(
[],
list(d.iterancestors(tag=('d', 'x'))))
self.assertEqual(
[],
list(d.iterancestors('d', 'x')))
self.assertEqual(
[b, a],
list(d.iterancestors(tag=('b', '*'))))
self.assertEqual(
[b, a],
list(d.iterancestors('b', '*')))
self.assertEqual(
[b],
list(d.iterancestors(tag=('b', 'c'))))
self.assertEqual(
[b],
list(d.iterancestors('b', 'c')))
def test_iterdescendants(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[b, d, c, e],
list(a.iterdescendants()))
self.assertEqual(
[],
list(d.iterdescendants()))
def test_iterdescendants_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[],
list(a.iterdescendants('a')))
self.assertEqual(
[],
list(a.iterdescendants(tag='a')))
a2 = SubElement(e, 'a')
self.assertEqual(
[a2],
list(a.iterdescendants('a')))
self.assertEqual(
[a2],
list(c.iterdescendants('a')))
self.assertEqual(
[a2],
list(c.iterdescendants(tag='a')))
def test_iterdescendants_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
self.assertEqual(
[b, e],
list(a.iterdescendants(tag=('a', 'b', 'e'))))
self.assertEqual(
[b, e],
list(a.iterdescendants('a', 'b', 'e')))
a2 = SubElement(e, 'a')
self.assertEqual(
[b, a2],
list(a.iterdescendants(tag=('a', 'b'))))
self.assertEqual(
[b, a2],
list(a.iterdescendants('a', 'b')))
self.assertEqual(
[],
list(c.iterdescendants(tag=('x', 'y', 'z'))))
self.assertEqual(
[],
list(c.iterdescendants('x', 'y', 'z')))
self.assertEqual(
[b, d, c, e, a2],
list(a.iterdescendants(tag=('x', 'y', 'z', '*'))))
self.assertEqual(
[b, d, c, e, a2],
list(a.iterdescendants('x', 'y', 'z', '*')))
def test_getroottree(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
a,
a.getroottree().getroot())
self.assertEqual(
a,
b.getroottree().getroot())
self.assertEqual(
a,
d.getroottree().getroot())
def test_getnext(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertEqual(
None,
a.getnext())
self.assertEqual(
c,
b.getnext())
self.assertEqual(
None,
c.getnext())
def test_getprevious(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
None,
a.getprevious())
self.assertEqual(
b,
c.getprevious())
self.assertEqual(
None,
b.getprevious())
def test_itersiblings(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.itersiblings()))
self.assertEqual(
[c],
list(b.itersiblings()))
self.assertEqual(
[],
list(c.itersiblings()))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True)))
self.assertEqual(
[],
list(b.itersiblings(preceding=True)))
def test_itersiblings_tag(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
self.assertEqual(
[],
list(a.itersiblings(tag='XXX')))
self.assertEqual(
[c],
list(b.itersiblings(tag='c')))
self.assertEqual(
[c],
list(b.itersiblings(tag='*')))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True, tag='b')))
self.assertEqual(
[],
list(c.itersiblings(preceding=True, tag='c')))
def test_itersiblings_tag_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(a, 'e')
self.assertEqual(
[],
list(a.itersiblings(tag=('XXX', 'YYY'))))
self.assertEqual(
[c, e],
list(b.itersiblings(tag=('c', 'd', 'e'))))
self.assertEqual(
[b],
list(c.itersiblings(preceding=True, tag=('b', 'b', 'c', 'd'))))
self.assertEqual(
[c, b],
list(e.itersiblings(preceding=True, tag=('c', '*'))))
def test_parseid(self):
parseid = self.etree.parseid
XML = self.etree.XML
xml_text = _bytes('''
<!DOCTYPE document [
<!ELEMENT document (h1,p)*>
<!ELEMENT h1 (#PCDATA)>
<!ATTLIST h1 myid ID #REQUIRED>
<!ELEMENT p (#PCDATA)>
<!ATTLIST p someid ID #REQUIRED>
]>
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
tree, dic = parseid(BytesIO(xml_text))
root = tree.getroot()
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"xmlid" : root[3],
"warn1" : root[4]
}
self.assertTrue("chapter1" in dic)
self.assertTrue("warn1" in dic)
self.assertTrue("xmlid" in dic)
self._checkIDDict(dic, expected)
def test_XMLDTDID(self):
XMLDTDID = self.etree.XMLDTDID
XML = self.etree.XML
xml_text = _bytes('''
<!DOCTYPE document [
<!ELEMENT document (h1,p)*>
<!ELEMENT h1 (#PCDATA)>
<!ATTLIST h1 myid ID #REQUIRED>
<!ELEMENT p (#PCDATA)>
<!ATTLIST p someid ID #REQUIRED>
]>
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p xml:id="xmlid">XML:ID paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
root, dic = XMLDTDID(xml_text)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {
"chapter1" : root[0],
"xmlid" : root[3],
"warn1" : root[4]
}
self.assertTrue("chapter1" in dic)
self.assertTrue("warn1" in dic)
self.assertTrue("xmlid" in dic)
self._checkIDDict(dic, expected)
def test_XMLDTDID_empty(self):
XMLDTDID = self.etree.XMLDTDID
XML = self.etree.XML
xml_text = _bytes('''
<document>
<h1 myid="chapter1">...</h1>
<p id="note1" class="note">...</p>
<p>Regular paragraph.</p>
<p someid="warn1" class="warning">...</p>
</document>
''')
root, dic = XMLDTDID(xml_text)
root2 = XML(xml_text)
self.assertEqual(self._writeElement(root),
self._writeElement(root2))
expected = {}
self._checkIDDict(dic, expected)
def _checkIDDict(self, dic, expected):
self.assertEqual(len(dic),
len(expected))
self.assertEqual(sorted(dic.items()),
sorted(expected.items()))
if sys.version_info < (3,):
self.assertEqual(sorted(dic.iteritems()),
sorted(expected.iteritems()))
self.assertEqual(sorted(dic.keys()),
sorted(expected.keys()))
if sys.version_info < (3,):
self.assertEqual(sorted(dic.iterkeys()),
sorted(expected.iterkeys()))
if sys.version_info < (3,):
self.assertEqual(sorted(dic.values()),
sorted(expected.values()))
self.assertEqual(sorted(dic.itervalues()),
sorted(expected.itervalues()))
def test_namespaces(self):
etree = self.etree
r = {'foo': 'http://ns.infrae.com/foo'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(
'foo',
e.prefix)
self.assertEqual(
_bytes('<foo:bar xmlns:foo="http://ns.infrae.com/foo"></foo:bar>'),
self._writeElement(e))
def test_namespaces_default(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(
None,
e.prefix)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e.tag)
self.assertEqual(
_bytes('<bar xmlns="http://ns.infrae.com/foo"></bar>'),
self._writeElement(e))
def test_namespaces_default_and_attr(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e.set('{http://ns.infrae.com/hoi}test', 'value')
self.assertEqual(
_bytes('<bar xmlns="http://ns.infrae.com/foo" xmlns:hoi="http://ns.infrae.com/hoi" hoi:test="value"></bar>'),
self._writeElement(e))
def test_attribute_keeps_namespace_prefix_on_merge(self):
etree = self.etree
root = etree.Element('{http://test/ns}root',
nsmap={None: 'http://test/ns'})
sub = etree.Element('{http://test/ns}sub',
nsmap={'test': 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<test:sub xmlns:test="http://test/ns" test:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<root xmlns="http://test/ns">'
'<sub xmlns:test="http://test/ns" test:attr="value"/>'
'</root>'),
etree.tostring(root))
def test_attribute_keeps_namespace_prefix_on_merge_with_nons(self):
etree = self.etree
root = etree.Element('root')
sub = etree.Element('{http://test/ns}sub',
nsmap={'test': 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<test:sub xmlns:test="http://test/ns" test:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<root>'
'<test:sub xmlns:test="http://test/ns" test:attr="value"/>'
'</root>'),
etree.tostring(root))
def test_attribute_gets_namespace_prefix_on_merge_with_nons(self):
etree = self.etree
root = etree.Element('root')
sub = etree.Element('{http://test/ns}sub',
nsmap={None: 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<sub xmlns="http://test/ns" '
'xmlns:ns0="http://test/ns" ns0:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<root>'
'<sub xmlns="http://test/ns"'
' xmlns:ns0="http://test/ns" ns0:attr="value"/>'
'</root>'),
etree.tostring(root))
def test_attribute_gets_namespace_prefix_on_merge(self):
etree = self.etree
root = etree.Element('{http://test/ns}root',
nsmap={'test': 'http://test/ns',
None: 'http://test/ns'})
sub = etree.Element('{http://test/ns}sub',
nsmap={None: 'http://test/ns'})
sub.attrib['{http://test/ns}attr'] = 'value'
self.assertEqual(sub.attrib['{http://test/ns}attr'], 'value')
self.assertEqual(
_bytes('<sub xmlns="http://test/ns" '
'xmlns:ns0="http://test/ns" ns0:attr="value"/>'),
etree.tostring(sub))
root.append(sub)
self.assertEqual(
_bytes('<test:root xmlns:test="http://test/ns" xmlns="http://test/ns">'
'<test:sub test:attr="value"/>'
'</test:root>'),
etree.tostring(root))
def test_namespaces_elementtree(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}z', nsmap=r)
tree = etree.ElementTree(element=e)
etree.SubElement(e, '{http://ns.infrae.com/hoi}x')
self.assertEqual(
_bytes('<z xmlns="http://ns.infrae.com/foo" xmlns:hoi="http://ns.infrae.com/hoi"><hoi:x></hoi:x></z>'),
self._writeElement(e))
def test_namespaces_default_copy_element(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo'}
e1 = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e2 = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e1.append(e2)
self.assertEqual(
None,
e1.prefix)
self.assertEqual(
None,
e1[0].prefix)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e1.tag)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e1[0].tag)
def test_namespaces_copy_element(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/BAR'}
e1 = etree.Element('{http://ns.infrae.com/BAR}bar', nsmap=r)
e2 = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
e1.append(e2)
self.assertEqual(
None,
e1.prefix)
self.assertNotEqual(
None,
e2.prefix)
self.assertEqual(
'{http://ns.infrae.com/BAR}bar',
e1.tag)
self.assertEqual(
'{http://ns.infrae.com/foo}bar',
e2.tag)
def test_namespaces_reuse_after_move(self):
ns_href = "http://a.b.c"
one = self.etree.fromstring(
_bytes('<foo><bar xmlns:ns="%s"><ns:baz/></bar></foo>' % ns_href))
baz = one[0][0]
two = self.etree.fromstring(
_bytes('<root xmlns:ns="%s"/>' % ns_href))
two.append(baz)
del one # make sure the source document is deallocated
self.assertEqual('{%s}baz' % ns_href, baz.tag)
self.assertEqual(
_bytes('<root xmlns:ns="%s"><ns:baz/></root>' % ns_href),
self.etree.tostring(two))
def test_namespace_cleanup(self):
xml = _bytes('<foo xmlns="F" xmlns:x="x"><bar xmlns:ns="NS" xmlns:b="b" xmlns="B"><ns:baz/></bar></foo>')
root = self.etree.fromstring(xml)
self.assertEqual(xml,
self.etree.tostring(root))
self.etree.cleanup_namespaces(root)
self.assertEqual(
_bytes('<foo xmlns="F"><bar xmlns:ns="NS" xmlns="B"><ns:baz/></bar></foo>'),
self.etree.tostring(root))
def test_element_nsmap(self):
etree = self.etree
r = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=r)
self.assertEqual(
r,
e.nsmap)
def test_subelement_nsmap(self):
etree = self.etree
re = {None: 'http://ns.infrae.com/foo',
'hoi': 'http://ns.infrae.com/hoi'}
e = etree.Element('{http://ns.infrae.com/foo}bar', nsmap=re)
rs = {None: 'http://ns.infrae.com/honk',
'top': 'http://ns.infrae.com/top'}
s = etree.SubElement(e, '{http://ns.infrae.com/honk}bar', nsmap=rs)
r = re.copy()
r.update(rs)
self.assertEqual(re, e.nsmap)
self.assertEqual(r, s.nsmap)
def test_html_prefix_nsmap(self):
etree = self.etree
el = etree.HTML('<hha:page-description>aa</hha:page-description>').find('.//page-description')
self.assertEqual({'hha': None}, el.nsmap)
def test_getiterator_filter_multiple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
f = SubElement(c, 'f')
self.assertEqual(
[a, b],
list(a.getiterator('a', 'b')))
self.assertEqual(
[],
list(a.getiterator('x', 'y')))
self.assertEqual(
[a, f],
list(a.getiterator('f', 'a')))
self.assertEqual(
[c, e, f],
list(c.getiterator('c', '*', 'a')))
self.assertEqual(
[],
list(a.getiterator( (), () )))
def test_getiterator_filter_multiple_tuple(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(b, 'd')
e = SubElement(c, 'e')
f = SubElement(c, 'f')
self.assertEqual(
[a, b],
list(a.getiterator( ('a', 'b') )))
self.assertEqual(
[],
list(a.getiterator( ('x', 'y') )))
self.assertEqual(
[a, f],
list(a.getiterator( ('f', 'a') )))
self.assertEqual(
[c, e, f],
list(c.getiterator( ('c', '*', 'a') )))
self.assertEqual(
[],
list(a.getiterator( () )))
def test_getiterator_filter_namespace(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{a}a')
b = SubElement(a, '{a}b')
c = SubElement(a, '{a}c')
d = SubElement(b, '{b}d')
e = SubElement(c, '{a}e')
f = SubElement(c, '{b}f')
g = SubElement(c, 'g')
self.assertEqual(
[a],
list(a.getiterator('{a}a')))
self.assertEqual(
[],
list(a.getiterator('{b}a')))
self.assertEqual(
[],
list(a.getiterator('a')))
self.assertEqual(
[a,b,d,c,e,f,g],
list(a.getiterator('*')))
self.assertEqual(
[f],
list(c.getiterator('{b}*')))
self.assertEqual(
[d, f],
list(a.getiterator('{b}*')))
self.assertEqual(
[g],
list(a.getiterator('g')))
self.assertEqual(
[g],
list(a.getiterator('{}g')))
self.assertEqual(
[g],
list(a.getiterator('{}*')))
def test_getiterator_filter_local_name(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('{a}a')
b = SubElement(a, '{nsA}b')
c = SubElement(b, '{nsB}b')
d = SubElement(a, 'b')
e = SubElement(a, '{nsA}e')
f = SubElement(e, '{nsB}e')
g = SubElement(e, 'e')
self.assertEqual(
[b, c, d],
list(a.getiterator('{*}b')))
self.assertEqual(
[e, f, g],
list(a.getiterator('{*}e')))
self.assertEqual(
[a, b, c, d, e, f, g],
list(a.getiterator('{*}*')))
def test_getiterator_filter_entities(self):
Element = self.etree.Element
Entity = self.etree.Entity
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
entity_b = Entity("TEST-b")
b.append(entity_b)
self.assertEqual(
[entity_b],
list(a.getiterator(Entity)))
entity_a = Entity("TEST-a")
a.append(entity_a)
self.assertEqual(
[entity_b, entity_a],
list(a.getiterator(Entity)))
self.assertEqual(
[entity_b],
list(b.getiterator(Entity)))
def test_getiterator_filter_element(self):
Element = self.etree.Element
Comment = self.etree.Comment
PI = self.etree.PI
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.append(Comment("test"))
a.append(PI("pi", "content"))
c = SubElement(a, 'c')
self.assertEqual(
[a, b, c],
list(a.getiterator(Element)))
def test_getiterator_filter_all_comment_pi(self):
# ElementTree iterates over everything here
Element = self.etree.Element
Comment = self.etree.Comment
PI = self.etree.PI
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
a.append(Comment("test"))
a.append(PI("pi", "content"))
c = SubElement(a, 'c')
self.assertEqual(
[a, b, c],
list(a.getiterator('*')))
def test_elementtree_find_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(_bytes('<a><b><c/></b><b/><c><b/></c></a>')))
self.assertEqual(tree.find(QName("c")), tree.getroot()[2])
def test_elementtree_findall_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(_bytes('<a><b><c/></b><b/><c><b/></c></a>')))
self.assertEqual(len(list(tree.findall(QName("c")))), 1)
def test_elementtree_findall_ns_qname(self):
XML = self.etree.XML
ElementTree = self.etree.ElementTree
QName = self.etree.QName
tree = ElementTree(XML(
_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><b/></a>')))
self.assertEqual(len(list(tree.findall(QName("b")))), 2)
self.assertEqual(len(list(tree.findall(QName("X", "b")))), 1)
def test_findall_ns(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><b/></a>'))
self.assertEqual(len(root.findall(".//{X}b")), 2)
self.assertEqual(len(root.findall(".//{X}*")), 2)
self.assertEqual(len(root.findall(".//b")), 3)
def test_findall_different_nsmaps(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><y:b/></a>'))
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
def test_findall_different_nsmaps(self):
XML = self.etree.XML
root = XML(_bytes('<a xmlns:x="X" xmlns:y="Y"><x:b><c/></x:b><b/><c><x:b/><b/></c><y:b/></a>'))
nsmap = {'xx': 'X'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 2)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
nsmap = {'xx': 'Y'}
self.assertEqual(len(root.findall(".//xx:b", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//xx:*", namespaces=nsmap)), 1)
self.assertEqual(len(root.findall(".//b", namespaces=nsmap)), 2)
def test_findall_syntax_error(self):
XML = self.etree.XML
root = XML(_bytes('<a><b><c/></b><b/><c><b/><b/></c><b/></a>'))
self.assertRaises(SyntaxError, root.findall, '')
self.assertRaises(SyntaxError, root.findall, '//') # absolute path on Element
self.assertRaises(SyntaxError, root.findall, './//')
def test_index(self):
etree = self.etree
e = etree.Element('foo')
for i in range(10):
etree.SubElement(e, 'a%s' % i)
for i in range(10):
self.assertEqual(
i,
e.index(e[i]))
self.assertEqual(
3, e.index(e[3], 3))
self.assertRaises(
ValueError, e.index, e[3], 4)
self.assertRaises(
ValueError, e.index, e[3], 0, 2)
self.assertRaises(
ValueError, e.index, e[8], 0, -3)
self.assertRaises(
ValueError, e.index, e[8], -5, -3)
self.assertEqual(
8, e.index(e[8], 0, -1))
self.assertEqual(
8, e.index(e[8], -12, -1))
self.assertEqual(
0, e.index(e[0], -12, -1))
def test_replace(self):
etree = self.etree
e = etree.Element('foo')
for i in range(10):
el = etree.SubElement(e, 'a%s' % i)
el.text = "text%d" % i
el.tail = "tail%d" % i
child0 = e[0]
child1 = e[1]
child2 = e[2]
e.replace(e[0], e[1])
self.assertEqual(
9, len(e))
self.assertEqual(
child1, e[0])
self.assertEqual(
child1.text, "text1")
self.assertEqual(
child1.tail, "tail1")
self.assertEqual(
child0.tail, "tail0")
self.assertEqual(
child2, e[1])
e.replace(e[-1], e[0])
self.assertEqual(
child1, e[-1])
self.assertEqual(
child1.text, "text1")
self.assertEqual(
child1.tail, "tail1")
self.assertEqual(
child2, e[0])
def test_replace_new(self):
etree = self.etree
e = etree.Element('foo')
for i in range(10):
etree.SubElement(e, 'a%s' % i)
new_element = etree.Element("test")
new_element.text = "TESTTEXT"
new_element.tail = "TESTTAIL"
child1 = e[1]
e.replace(e[0], new_element)
self.assertEqual(
new_element, e[0])
self.assertEqual(
"TESTTEXT",
e[0].text)
self.assertEqual(
"TESTTAIL",
e[0].tail)
self.assertEqual(
child1, e[1])
def test_setslice_all_empty_reversed(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
e = Element('e')
f = Element('f')
g = Element('g')
s = [e, f, g]
a[::-1] = s
self.assertEqual(
[g, f, e],
list(a))
def test_setslice_step(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
a[1::2] = [x, y]
self.assertEqual(
[b, x, d, y],
list(a))
def test_setslice_step_negative(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
a[1::-1] = [x, y]
self.assertEqual(
[y, x, d, e],
list(a))
def test_setslice_step_negative2(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
a[::-2] = [x, y]
self.assertEqual(
[b, y, d, x],
list(a))
def test_setslice_step_overrun(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
try:
slice
except NameError:
print("slice() not found")
return
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(a, 'd')
e = SubElement(a, 'e')
x = Element('x')
y = Element('y')
z = Element('z')
self.assertRaises(
ValueError,
operator.setitem, a, slice(1,None,2), [x, y, z])
self.assertEqual(
[b, c, d, e],
list(a))
def test_sourceline_XML(self):
XML = self.etree.XML
root = XML(_bytes('''<?xml version="1.0"?>
<root><test>
<bla/></test>
</root>
'''))
self.assertEqual(
[2, 2, 4],
[ el.sourceline for el in root.getiterator() ])
def test_large_sourceline_XML(self):
XML = self.etree.XML
root = XML(_bytes(
'<?xml version="1.0"?>\n'
'<root>' + '\n' * 65536 +
'<p>' + '\n' * 65536 + '</p>\n' +
'<br/>\n'
'</root>'))
if self.etree.LIBXML_VERSION >= (2, 9):
expected = [2, 131074, 131076]
else:
expected = [2, 65535, 65535]
self.assertEqual(expected, [el.sourceline for el in root.iter()])
def test_sourceline_parse(self):
parse = self.etree.parse
tree = parse(fileInTestDir('include/test_xinclude.xml'))
self.assertEqual(
[1, 2, 3],
[ el.sourceline for el in tree.getiterator() ])
def test_sourceline_iterparse_end(self):
iterparse = self.etree.iterparse
lines = [ el.sourceline for (event, el) in
iterparse(fileInTestDir('include/test_xinclude.xml')) ]
self.assertEqual(
[2, 3, 1],
lines)
def test_sourceline_iterparse_start(self):
iterparse = self.etree.iterparse
lines = [ el.sourceline for (event, el) in
iterparse(fileInTestDir('include/test_xinclude.xml'),
events=("start",)) ]
self.assertEqual(
[1, 2, 3],
lines)
def test_sourceline_element(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
el = Element("test")
self.assertEqual(None, el.sourceline)
child = SubElement(el, "test")
self.assertEqual(None, el.sourceline)
self.assertEqual(None, child.sourceline)
def test_XML_base_url_docinfo(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
docinfo = root.getroottree().docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_XML_set_base_url_docinfo(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
docinfo = root.getroottree().docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
docinfo.URL = "https://secret/url"
self.assertEqual(docinfo.URL, "https://secret/url")
def test_parse_stringio_base_url(self):
etree = self.etree
tree = etree.parse(BytesIO("<root/>"), base_url="http://no/such/url")
docinfo = tree.docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_parse_base_url_docinfo(self):
etree = self.etree
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'),
base_url="http://no/such/url")
docinfo = tree.docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_HTML_base_url_docinfo(self):
etree = self.etree
root = etree.HTML(_bytes("<html/>"), base_url="http://no/such/url")
docinfo = root.getroottree().docinfo
self.assertEqual(docinfo.URL, "http://no/such/url")
def test_docinfo_public(self):
etree = self.etree
xml_header = '<?xml version="1.0" encoding="ascii"?>'
pub_id = "-//W3C//DTD XHTML 1.0 Transitional//EN"
sys_id = "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"
doctype_string = '<!DOCTYPE html PUBLIC "%s" "%s">' % (pub_id, sys_id)
xml = _bytes(xml_header + doctype_string + '<html><body></body></html>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "ascii")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, pub_id)
self.assertEqual(docinfo.system_url, sys_id)
self.assertEqual(docinfo.root_name, 'html')
self.assertEqual(docinfo.doctype, doctype_string)
def test_docinfo_system(self):
etree = self.etree
xml_header = '<?xml version="1.0" encoding="UTF-8"?>'
sys_id = "some.dtd"
doctype_string = '<!DOCTYPE html SYSTEM "%s">' % sys_id
xml = _bytes(xml_header + doctype_string + '<html><body></body></html>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "UTF-8")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, None)
self.assertEqual(docinfo.system_url, sys_id)
self.assertEqual(docinfo.root_name, 'html')
self.assertEqual(docinfo.doctype, doctype_string)
def test_docinfo_empty(self):
etree = self.etree
xml = _bytes('<html><body></body></html>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "UTF-8")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, None)
self.assertEqual(docinfo.system_url, None)
self.assertEqual(docinfo.root_name, 'html')
self.assertEqual(docinfo.doctype, '')
def test_docinfo_name_only(self):
etree = self.etree
xml = _bytes('<!DOCTYPE root><root></root>')
tree = etree.parse(BytesIO(xml))
docinfo = tree.docinfo
self.assertEqual(docinfo.encoding, "UTF-8")
self.assertEqual(docinfo.xml_version, "1.0")
self.assertEqual(docinfo.public_id, None)
self.assertEqual(docinfo.system_url, None)
self.assertEqual(docinfo.root_name, 'root')
self.assertEqual(docinfo.doctype, '<!DOCTYPE root>')
def test_doctype_name_only_roundtrip(self):
etree = self.etree
xml = _bytes('<!DOCTYPE root>\n<root/>')
tree = etree.parse(BytesIO(xml))
self.assertEqual(xml, etree.tostring(tree))
def test_doctype_output_override(self):
etree = self.etree
pub_id = "-//W3C//DTD XHTML 1.0 Transitional//EN"
sys_id = "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"
doctype_string = _bytes('<!DOCTYPE html PUBLIC "%s" "%s">' % (pub_id, sys_id))
xml = _bytes('<!DOCTYPE root>\n<root/>')
tree = etree.parse(BytesIO(xml))
self.assertEqual(xml.replace(_bytes('<!DOCTYPE root>'), doctype_string),
etree.tostring(tree, doctype=doctype_string))
def test_xml_base(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
self.assertEqual(root.base, "http://no/such/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'), None)
root.base = "https://secret/url"
self.assertEqual(root.base, "https://secret/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'),
"https://secret/url")
def test_xml_base_attribute(self):
etree = self.etree
root = etree.XML(_bytes("<root/>"), base_url="http://no/such/url")
self.assertEqual(root.base, "http://no/such/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'), None)
root.set('{http://www.w3.org/XML/1998/namespace}base',
"https://secret/url")
self.assertEqual(root.base, "https://secret/url")
self.assertEqual(
root.get('{http://www.w3.org/XML/1998/namespace}base'),
"https://secret/url")
def test_html_base(self):
etree = self.etree
root = etree.HTML(_bytes("<html><body></body></html>"),
base_url="http://no/such/url")
self.assertEqual(root.base, "http://no/such/url")
def test_html_base_tag(self):
etree = self.etree
root = etree.HTML(_bytes('<html><head><base href="http://no/such/url"></head></html>'))
self.assertEqual(root.base, "http://no/such/url")
def test_parse_fileobject_unicode(self):
# parse from a file object that returns unicode strings
f = LargeFileLikeUnicode()
tree = self.etree.parse(f)
root = tree.getroot()
self.assertTrue(root.tag.endswith('root'))
def test_dtd_io(self):
# check that DTDs that go in also go back out
xml = _bytes('''\
<!DOCTYPE test SYSTEM "test.dtd" [
<!ENTITY entity "tasty">
<!ELEMENT test (a)>
<!ELEMENT a (#PCDATA)>
]>
<test><a>test-test</a></test>\
''')
tree = self.etree.parse(BytesIO(xml))
self.assertEqual(self.etree.tostring(tree).replace(_bytes(" "), _bytes("")),
xml.replace(_bytes(" "), _bytes("")))
def test_byte_zero(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text", 'ha\0ho')
self.assertRaises(ValueError, setattr, a, "tail", 'ha\0ho')
self.assertRaises(ValueError, Element, 'ha\0ho')
def test_unicode_byte_zero(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\0ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\0ho'))
self.assertRaises(ValueError, Element,
_str('ha\0ho'))
def test_byte_invalid(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text", 'ha\x07ho')
self.assertRaises(ValueError, setattr, a, "text", 'ha\x02ho')
self.assertRaises(ValueError, setattr, a, "tail", 'ha\x07ho')
self.assertRaises(ValueError, setattr, a, "tail", 'ha\x02ho')
self.assertRaises(ValueError, Element, 'ha\x07ho')
self.assertRaises(ValueError, Element, 'ha\x02ho')
def test_unicode_byte_invalid(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\x07ho'))
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\x02ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\x07ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\x02ho'))
self.assertRaises(ValueError, Element,
_str('ha\x07ho'))
self.assertRaises(ValueError, Element,
_str('ha\x02ho'))
def test_unicode_byte_invalid_sequence(self):
Element = self.etree.Element
a = Element('a')
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\u1234\x07ho'))
self.assertRaises(ValueError, setattr, a, "text",
_str('ha\u1234\x02ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\u1234\x07ho'))
self.assertRaises(ValueError, setattr, a, "tail",
_str('ha\u1234\x02ho'))
self.assertRaises(ValueError, Element,
_str('ha\u1234\x07ho'))
self.assertRaises(ValueError, Element,
_str('ha\u1234\x02ho'))
def test_encoding_tostring_utf16(self):
# ElementTree fails to serialize this
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tostring(a, encoding='UTF-16')
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(result))
def test_tostring_none(self):
# ElementTree raises an AssertionError here
tostring = self.etree.tostring
self.assertRaises(TypeError, self.etree.tostring, None)
def test_tostring_pretty(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tostring(a)
self.assertEqual(result, _bytes("<a><b/><c/></a>"))
result = tostring(a, pretty_print=False)
self.assertEqual(result, _bytes("<a><b/><c/></a>"))
result = tostring(a, pretty_print=True)
self.assertEqual(result, _bytes("<a>\n <b/>\n <c/>\n</a>\n"))
def test_tostring_with_tail(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.tail = "aTAIL"
b = SubElement(a, 'b')
b.tail = "bTAIL"
c = SubElement(a, 'c')
result = tostring(a)
self.assertEqual(result, _bytes("<a><b/>bTAIL<c/></a>aTAIL"))
result = tostring(a, with_tail=False)
self.assertEqual(result, _bytes("<a><b/>bTAIL<c/></a>"))
result = tostring(a, with_tail=True)
self.assertEqual(result, _bytes("<a><b/>bTAIL<c/></a>aTAIL"))
def test_tostring_method_html_with_tail(self):
tostring = self.etree.tostring
html = self.etree.fromstring(
'<html><body>'
'<div><p>Some text<i>\r\n</i></p></div>\r\n'
'</body></html>',
parser=self.etree.HTMLParser())
self.assertEqual(html.tag, 'html')
div = html.find('.//div')
self.assertEqual(div.tail, '\r\n')
result = tostring(div, method='html')
self.assertEqual(
result,
_bytes("<div><p>Some text<i>\r\n</i></p></div>\r\n"))
result = tostring(div, method='html', with_tail=True)
self.assertEqual(
result,
_bytes("<div><p>Some text<i>\r\n</i></p></div>\r\n"))
result = tostring(div, method='html', with_tail=False)
self.assertEqual(
result,
_bytes("<div><p>Some text<i>\r\n</i></p></div>"))
def test_standalone(self):
tostring = self.etree.tostring
XML = self.etree.XML
ElementTree = self.etree.ElementTree
Element = self.etree.Element
tree = Element("root").getroottree()
self.assertEqual(None, tree.docinfo.standalone)
tree = XML(_bytes("<root/>")).getroottree()
self.assertEqual(None, tree.docinfo.standalone)
tree = XML(_bytes(
"<?xml version='1.0' encoding='ASCII' standalone='yes'?>\n<root/>"
)).getroottree()
self.assertEqual(True, tree.docinfo.standalone)
tree = XML(_bytes(
"<?xml version='1.0' encoding='ASCII' standalone='no'?>\n<root/>"
)).getroottree()
self.assertEqual(False, tree.docinfo.standalone)
def test_tostring_standalone(self):
tostring = self.etree.tostring
XML = self.etree.XML
ElementTree = self.etree.ElementTree
root = XML(_bytes("<root/>"))
tree = ElementTree(root)
self.assertEqual(None, tree.docinfo.standalone)
result = tostring(root, xml_declaration=True, encoding="ASCII")
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII'?>\n<root/>"))
result = tostring(root, xml_declaration=True, encoding="ASCII",
standalone=True)
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII' standalone='yes'?>\n<root/>"))
tree = ElementTree(XML(result))
self.assertEqual(True, tree.docinfo.standalone)
result = tostring(root, xml_declaration=True, encoding="ASCII",
standalone=False)
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII' standalone='no'?>\n<root/>"))
tree = ElementTree(XML(result))
self.assertEqual(False, tree.docinfo.standalone)
def test_tostring_standalone_in_out(self):
tostring = self.etree.tostring
XML = self.etree.XML
ElementTree = self.etree.ElementTree
root = XML(_bytes(
"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>\n<root/>"))
tree = ElementTree(root)
self.assertEqual(True, tree.docinfo.standalone)
result = tostring(root, xml_declaration=True, encoding="ASCII")
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII'?>\n<root/>"))
result = tostring(root, xml_declaration=True, encoding="ASCII",
standalone=True)
self.assertEqual(result, _bytes(
"<?xml version='1.0' encoding='ASCII' standalone='yes'?>\n<root/>"))
def test_tostring_method_text_encoding(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = "A"
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = _str("Søk på nettet")
c = SubElement(a, 'c')
c.text = "C"
result = tostring(a, method="text", encoding="UTF-16")
self.assertEqual(_str('ABSøk på nettetCtail').encode("UTF-16"),
result)
def test_tostring_method_text_unicode(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
a.text = _str('Søk på nettetA')
a.tail = "tail"
b = SubElement(a, 'b')
b.text = "B"
b.tail = _str('Søk på nettetB')
c = SubElement(a, 'c')
c.text = "C"
self.assertRaises(UnicodeEncodeError,
tostring, a, method="text")
self.assertEqual(
_str('Søk på nettetABSøk på nettetBCtail').encode('utf-8'),
tostring(a, encoding="UTF-8", method="text"))
def test_tounicode(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertTrue(isinstance(tounicode(a), _unicode))
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(tounicode(a)))
def test_tounicode_element(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
self.assertTrue(isinstance(tounicode(b), _unicode))
self.assertTrue(isinstance(tounicode(c), _unicode))
self.assertEqual(_bytes('<b></b>'),
canonicalize(tounicode(b)))
self.assertEqual(_bytes('<c><d></d></c>'),
canonicalize(tounicode(c)))
def test_tounicode_none(self):
tounicode = self.etree.tounicode
self.assertRaises(TypeError, self.etree.tounicode, None)
def test_tounicode_element_tail(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
b.tail = 'Foo'
self.assertTrue(isinstance(tounicode(b), _unicode))
self.assertTrue(tounicode(b) == '<b/>Foo' or
tounicode(b) == '<b />Foo')
def test_tounicode_pretty(self):
tounicode = self.etree.tounicode
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tounicode(a)
self.assertEqual(result, "<a><b/><c/></a>")
result = tounicode(a, pretty_print=False)
self.assertEqual(result, "<a><b/><c/></a>")
result = tounicode(a, pretty_print=True)
self.assertEqual(result, "<a>\n <b/>\n <c/>\n</a>\n")
def test_tostring_unicode(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
self.assertTrue(isinstance(tostring(a, encoding=_unicode), _unicode))
self.assertEqual(_bytes('<a><b></b><c></c></a>'),
canonicalize(tostring(a, encoding=_unicode)))
def test_tostring_unicode_element(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
self.assertTrue(isinstance(tostring(b, encoding=_unicode), _unicode))
self.assertTrue(isinstance(tostring(c, encoding=_unicode), _unicode))
self.assertEqual(_bytes('<b></b>'),
canonicalize(tostring(b, encoding=_unicode)))
self.assertEqual(_bytes('<c><d></d></c>'),
canonicalize(tostring(c, encoding=_unicode)))
def test_tostring_unicode_none(self):
tostring = self.etree.tostring
self.assertRaises(TypeError, self.etree.tostring,
None, encoding=_unicode)
def test_tostring_unicode_element_tail(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
d = SubElement(c, 'd')
b.tail = 'Foo'
self.assertTrue(isinstance(tostring(b, encoding=_unicode), _unicode))
self.assertTrue(tostring(b, encoding=_unicode) == '<b/>Foo' or
tostring(b, encoding=_unicode) == '<b />Foo')
def test_tostring_unicode_pretty(self):
tostring = self.etree.tostring
Element = self.etree.Element
SubElement = self.etree.SubElement
a = Element('a')
b = SubElement(a, 'b')
c = SubElement(a, 'c')
result = tostring(a, encoding=_unicode)
self.assertEqual(result, "<a><b/><c/></a>")
result = tostring(a, encoding=_unicode, pretty_print=False)
self.assertEqual(result, "<a><b/><c/></a>")
result = tostring(a, encoding=_unicode, pretty_print=True)
self.assertEqual(result, "<a>\n <b/>\n <c/>\n</a>\n")
def test_pypy_proxy_collect(self):
root = etree.Element('parent')
etree.SubElement(root, 'child')
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, 'child')
# in PyPy, GC used to kill the Python proxy instance without cleanup
gc.collect()
self.assertEqual(len(root), 1)
self.assertEqual(root[0].tag, 'child')
def test_element_refcycle(self):
class SubEl(etree.ElementBase):
pass
el1 = SubEl()
el2 = SubEl()
self.assertEqual('SubEl', el1.tag)
self.assertEqual('SubEl', el2.tag)
el1.other = el2
el2.other = el1
del el1, el2
gc.collect()
# not really testing anything here, but it shouldn't crash
def test_proxy_collect_siblings(self):
root = etree.Element('parent')
c1 = etree.SubElement(root, 'child1')
c2 = etree.SubElement(root, 'child2')
root.remove(c1)
root.remove(c2)
c1.addnext(c2)
del c1
# trigger deallocation attempt of c1
c2.getprevious()
# make sure it wasn't deallocated
self.assertEqual('child1', c2.getprevious().tag)
def test_proxy_collect_siblings_text(self):
root = etree.Element('parent')
c1 = etree.SubElement(root, 'child1')
c2 = etree.SubElement(root, 'child2')
root.remove(c1)
root.remove(c2)
c1.addnext(c2)
c1.tail = 'abc'
c2.tail = 'xyz'
del c1
# trigger deallocation attempt of c1
c2.getprevious()
# make sure it wasn't deallocated
self.assertEqual('child1', c2.getprevious().tag)
self.assertEqual('abc', c2.getprevious().tail)
# helper methods
def _writeElement(self, element, encoding='us-ascii', compression=0):
"""Write out element for comparison.
"""
ElementTree = self.etree.ElementTree
f = BytesIO()
tree = ElementTree(element=element)
tree.write(f, encoding=encoding, compression=compression)
data = f.getvalue()
if compression:
data = zlib.decompress(data)
return canonicalize(data)
class _XIncludeTestCase(HelperTestCase):
def test_xinclude_text(self):
filename = fileInTestDir('test_broken.xml')
root = etree.XML(_bytes('''\
<doc xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="%s" parse="text"/>
</doc>
''' % path2url(filename)))
old_text = root.text
content = read_file(filename)
old_tail = root[0].tail
self.include( etree.ElementTree(root) )
self.assertEqual(old_text + content + old_tail,
root.text)
def test_xinclude(self):
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'))
self.assertNotEqual(
'a',
tree.getroot()[1].tag)
# process xincludes
self.include( tree )
# check whether we find it replaced with included data
self.assertEqual(
'a',
tree.getroot()[1].tag)
def test_xinclude_resolver(self):
class res(etree.Resolver):
include_text = read_file(fileInTestDir('test.xml'))
called = {}
def resolve(self, url, id, context):
if url.endswith(".dtd"):
self.called["dtd"] = True
return self.resolve_filename(
fileInTestDir('test.dtd'), context)
elif url.endswith("test_xinclude.xml"):
self.called["input"] = True
return None # delegate to default resolver
else:
self.called["include"] = True
return self.resolve_string(self.include_text, context)
res_instance = res()
parser = etree.XMLParser(load_dtd = True)
parser.resolvers.add(res_instance)
tree = etree.parse(fileInTestDir('include/test_xinclude.xml'),
parser = parser)
self.include(tree)
called = list(res_instance.called.items())
called.sort()
self.assertEqual(
[("dtd", True), ("include", True), ("input", True)],
called)
class ETreeXIncludeTestCase(_XIncludeTestCase):
def include(self, tree):
tree.xinclude()
class ElementIncludeTestCase(_XIncludeTestCase):
from lxml import ElementInclude
def include(self, tree):
self.ElementInclude.include(tree.getroot())
class ETreeC14NTestCase(HelperTestCase):
def test_c14n(self):
tree = self.parse(_bytes('<a><b/></a>'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write_c14n(f, compression=9)
gzfile = gzip.GzipFile(fileobj=BytesIO(f.getvalue()))
try:
s = gzfile.read()
finally:
gzfile.close()
self.assertEqual(_bytes('<a>'+'<b></b>'*200+'</a>'),
s)
def test_c14n_file(self):
tree = self.parse(_bytes('<a><b/></a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write_c14n(filename)
data = read_file(filename, 'rb')
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a><b></b></a>'),
data)
def test_c14n_file_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write_c14n(filename, compression=9)
f = gzip.open(filename, 'rb')
try:
data = f.read()
finally:
f.close()
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a>'+'<b></b>'*200+'</a>'),
data)
def test_c14n_with_comments(self):
tree = self.parse(_bytes('<!--hi--><a><!--ho--><b/></a><!--hu-->'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
f = BytesIO()
tree.write_c14n(f, with_comments=True)
s = f.getvalue()
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
f = BytesIO()
tree.write_c14n(f, with_comments=False)
s = f.getvalue()
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_tostring_with_comments(self):
tree = self.parse(_bytes('<!--hi--><a><!--ho--><b/></a><!--hu-->'))
s = etree.tostring(tree, method='c14n')
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
s = etree.tostring(tree, method='c14n', with_comments=True)
self.assertEqual(_bytes('<!--hi-->\n<a><!--ho--><b></b></a>\n<!--hu-->'),
s)
s = etree.tostring(tree, method='c14n', with_comments=False)
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_element_tostring_with_comments(self):
tree = self.parse(_bytes('<!--hi--><a><!--ho--><b/></a><!--hu-->'))
s = etree.tostring(tree.getroot(), method='c14n')
self.assertEqual(_bytes('<a><!--ho--><b></b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', with_comments=True)
self.assertEqual(_bytes('<a><!--ho--><b></b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', with_comments=False)
self.assertEqual(_bytes('<a><b></b></a>'),
s)
def test_c14n_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
f = BytesIO()
tree.write_c14n(f)
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
f = BytesIO()
tree.write_c14n(f, exclusive=False)
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
f = BytesIO()
tree.write_c14n(f, exclusive=True)
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
f = BytesIO()
tree.write_c14n(f, exclusive=True, inclusive_ns_prefixes=['z'])
s = f.getvalue()
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
def test_c14n_tostring_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree, method='c14n')
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree, method='c14n', exclusive=False)
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree, method='c14n', exclusive=True)
self.assertEqual(_bytes('<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
s = etree.tostring(tree, method='c14n', exclusive=True, inclusive_ns_prefixes=['y'])
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
def test_c14n_element_tostring_exclusive(self):
tree = self.parse(_bytes(
'<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree.getroot(), method='c14n')
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', exclusive=False)
self.assertEqual(_bytes('<a xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
s = etree.tostring(tree.getroot(), method='c14n', exclusive=True)
self.assertEqual(_bytes('<a xmlns="http://abc"><z:b xmlns:z="http://cde"></z:b></a>'),
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=False)
self.assertEqual(_bytes('<z:b xmlns="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"></z:b>'),
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=True)
self.assertEqual(_bytes('<z:b xmlns:z="http://cde"></z:b>'),
s)
s = etree.tostring(tree.getroot()[0], method='c14n', exclusive=True, inclusive_ns_prefixes=['y'])
self.assertEqual(_bytes('<z:b xmlns:y="http://bcd" xmlns:z="http://cde"></z:b>'),
s)
def test_c14n_tostring_inclusive_ns_prefixes(self):
""" Regression test to fix memory allocation issues (use 3+ inclusive NS spaces)"""
tree = self.parse(_bytes(
'<a xmlns:x="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b/></a>'))
s = etree.tostring(tree, method='c14n', exclusive=True, inclusive_ns_prefixes=['x', 'y', 'z'])
self.assertEqual(_bytes('<a xmlns:x="http://abc" xmlns:y="http://bcd" xmlns:z="http://cde"><z:b></z:b></a>'),
s)
class ETreeWriteTestCase(HelperTestCase):
def test_write(self):
tree = self.parse(_bytes('<a><b/></a>'))
f = BytesIO()
tree.write(f)
s = f.getvalue()
self.assertEqual(_bytes('<a><b/></a>'),
s)
def test_write_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write(f, compression=9)
gzfile = gzip.GzipFile(fileobj=BytesIO(f.getvalue()))
try:
s = gzfile.read()
finally:
gzfile.close()
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s)
def test_write_gzip_level(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
f = BytesIO()
tree.write(f, compression=0)
s0 = f.getvalue()
f = BytesIO()
tree.write(f)
self.assertEqual(f.getvalue(), s0)
f = BytesIO()
tree.write(f, compression=1)
s = f.getvalue()
self.assertTrue(len(s) <= len(s0))
gzfile = gzip.GzipFile(fileobj=BytesIO(s))
try:
s1 = gzfile.read()
finally:
gzfile.close()
f = BytesIO()
tree.write(f, compression=9)
s = f.getvalue()
self.assertTrue(len(s) <= len(s0))
gzfile = gzip.GzipFile(fileobj=BytesIO(s))
try:
s9 = gzfile.read()
finally:
gzfile.close()
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s0)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s1)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
s9)
def test_write_file(self):
tree = self.parse(_bytes('<a><b/></a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write(filename)
data = read_file(filename, 'rb')
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a><b/></a>'),
data)
def test_write_file_gzip(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write(filename, compression=9)
f = gzip.open(filename, 'rb')
try:
data = f.read()
finally:
f.close()
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
def test_write_file_gzip_parse(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write(filename, compression=9)
data = etree.tostring(etree.parse(filename))
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
def test_write_file_gzipfile_parse(self):
tree = self.parse(_bytes('<a>'+'<b/>'*200+'</a>'))
handle, filename = tempfile.mkstemp()
try:
tree.write(filename, compression=9)
data = etree.tostring(etree.parse(
gzip.GzipFile(filename)))
finally:
os.close(handle)
os.remove(filename)
self.assertEqual(_bytes('<a>'+'<b/>'*200+'</a>'),
data)
class ETreeErrorLogTest(HelperTestCase):
etree = etree
def test_parse_error_logging(self):
parse = self.etree.parse
f = BytesIO('<a><b></c></b></a>')
self.etree.clear_error_log()
try:
parse(f)
logs = None
except SyntaxError:
e = sys.exc_info()[1]
logs = e.error_log
f.close()
self.assertTrue([ log for log in logs
if 'mismatch' in log.message ])
self.assertTrue([ log for log in logs
if 'PARSER' in log.domain_name])
self.assertTrue([ log for log in logs
if 'ERR_TAG_NAME_MISMATCH' in log.type_name ])
self.assertTrue([ log for log in logs
if 1 == log.line ])
self.assertTrue([ log for log in logs
if 15 == log.column ])
def _test_python_error_logging(self):
"""This can't really be tested as long as there isn't a way to
reset the logging setup ...
"""
parse = self.etree.parse
messages = []
class Logger(self.etree.PyErrorLog):
def log(self, entry, message, *args):
messages.append(message)
self.etree.use_global_python_log(Logger())
f = BytesIO('<a><b></c></b></a>')
try:
parse(f)
except SyntaxError:
pass
f.close()
self.assertTrue([ message for message in messages
if 'mismatch' in message ])
self.assertTrue([ message for message in messages
if ':PARSER:' in message])
self.assertTrue([ message for message in messages
if ':ERR_TAG_NAME_MISMATCH:' in message ])
self.assertTrue([ message for message in messages
if ':1:15:' in message ])
class XMLPullParserTest(unittest.TestCase):
etree = etree
def assert_event_tags(self, events, expected):
self.assertEqual([(action, elem.tag) for action, elem in events],
expected)
def test_pull_from_simple_target(self):
class Target(object):
def start(self, tag, attrib):
return 'start(%s)' % tag
def end(self, tag):
return 'end(%s)' % tag
def close(self):
return 'close()'
parser = self.etree.XMLPullParser(target=Target())
events = parser.read_events()
parser.feed('<root><element>')
self.assertFalse(list(events))
self.assertFalse(list(events))
parser.feed('</element><child>')
self.assertEqual([('end', 'end(element)')], list(events))
parser.feed('</child>')
self.assertEqual([('end', 'end(child)')], list(events))
parser.feed('</root>')
self.assertEqual([('end', 'end(root)')], list(events))
self.assertFalse(list(events))
self.assertEqual('close()', parser.close())
def test_pull_from_simple_target_start_end(self):
class Target(object):
def start(self, tag, attrib):
return 'start(%s)' % tag
def end(self, tag):
return 'end(%s)' % tag
def close(self):
return 'close()'
parser = self.etree.XMLPullParser(
['start', 'end'], target=Target())
events = parser.read_events()
parser.feed('<root><element>')
self.assertEqual(
[('start', 'start(root)'), ('start', 'start(element)')],
list(events))
self.assertFalse(list(events))
parser.feed('</element><child>')
self.assertEqual(
[('end', 'end(element)'), ('start', 'start(child)')],
list(events))
parser.feed('</child>')
self.assertEqual(
[('end', 'end(child)')],
list(events))
parser.feed('</root>')
self.assertEqual(
[('end', 'end(root)')],
list(events))
self.assertFalse(list(events))
self.assertEqual('close()', parser.close())
def test_pull_from_tree_builder(self):
parser = self.etree.XMLPullParser(
['start', 'end'], target=etree.TreeBuilder())
events = parser.read_events()
parser.feed('<root><element>')
self.assert_event_tags(
events, [('start', 'root'), ('start', 'element')])
self.assertFalse(list(events))
parser.feed('</element><child>')
self.assert_event_tags(
events, [('end', 'element'), ('start', 'child')])
parser.feed('</child>')
self.assert_event_tags(
events, [('end', 'child')])
parser.feed('</root>')
self.assert_event_tags(
events, [('end', 'root')])
self.assertFalse(list(events))
root = parser.close()
self.assertEqual('root', root.tag)
def test_pull_from_tree_builder_subclass(self):
class Target(etree.TreeBuilder):
def end(self, tag):
el = super(Target, self).end(tag)
el.tag += '-huhu'
return el
parser = self.etree.XMLPullParser(
['start', 'end'], target=Target())
events = parser.read_events()
parser.feed('<root><element>')
self.assert_event_tags(
events, [('start', 'root'), ('start', 'element')])
self.assertFalse(list(events))
parser.feed('</element><child>')
self.assert_event_tags(
events, [('end', 'element-huhu'), ('start', 'child')])
parser.feed('</child>')
self.assert_event_tags(
events, [('end', 'child-huhu')])
parser.feed('</root>')
self.assert_event_tags(
events, [('end', 'root-huhu')])
self.assertFalse(list(events))
root = parser.close()
self.assertEqual('root-huhu', root.tag)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeOnlyTestCase)])
suite.addTests([unittest.makeSuite(ETreeXIncludeTestCase)])
suite.addTests([unittest.makeSuite(ElementIncludeTestCase)])
suite.addTests([unittest.makeSuite(ETreeC14NTestCase)])
suite.addTests([unittest.makeSuite(ETreeWriteTestCase)])
suite.addTests([unittest.makeSuite(ETreeErrorLogTest)])
suite.addTests([unittest.makeSuite(XMLPullParserTest)])
suite.addTests(doctest.DocTestSuite(etree))
suite.addTests(
[make_doctest('../../../doc/tutorial.txt')])
if sys.version_info >= (2,6):
# now requires the 'with' statement
suite.addTests(
[make_doctest('../../../doc/api.txt')])
suite.addTests(
[make_doctest('../../../doc/FAQ.txt')])
suite.addTests(
[make_doctest('../../../doc/parsing.txt')])
suite.addTests(
[make_doctest('../../../doc/resolvers.txt')])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
laurent-george/weboob
|
refs/heads/master
|
modules/europarl/test.py
|
7
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
# Copyright(C) 2012 François Revol
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
#from weboob.capabilities.video import BaseVideo
class EuroparlTest(BackendTest):
MODULE = 'europarl'
# def test_search(self):
# l = list(self.backend.search_videos('neelie kroes'))
# self.assertTrue(len(l) > 0)
# v = l[0]
# self.backend.fillobj(v, ('url',))
# self.assertTrue(v.url and v.url.startswith('http://'), 'URL for video "%s" not found: %s' % (v.id, v.url))
# self.backend.browser.openurl(v.url)
# def test_latest(self):
# l = list(self.backend.iter_resources([BaseVideo], [u'latest']))
# self.assertTrue(len(l) > 0)
# v = l[0]
# self.backend.fillobj(v, ('url',))
# self.assertTrue(v.url and v.url.startswith('http://'), 'URL for video "%s" not found: %s' % (v.id, v.url))
|
nburn42/tensorflow
|
refs/heads/master
|
tensorflow/contrib/training/python/training/training_test.py
|
44
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.training.training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.training.python.training import training
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables as variables_lib2
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
# pylint: enable=g-import-not-at-top
def logistic_classifier(inputs):
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
def batchnorm_classifier(inputs):
inputs = layers.batch_norm(inputs, decay=0.1, fused=False)
return layers.fully_connected(inputs, 1, activation_fn=math_ops.sigmoid)
class ClipGradsTest(test.TestCase):
def testClipGrads(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms(
gradients_to_variables, 3.0)
with self.test_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
def testClipGradsFn(self):
xs = variables_lib2.Variable(0.0)
ys = xs * 4.0
grads = gradients_impl.gradients([ys], [xs])
gradients_to_variables = list(zip(grads, [xs]))
clipped_gradients_to_variables = training.clip_gradient_norms_fn(3.0)(
gradients_to_variables)
with self.test_session() as session:
session.run(variables_lib2.global_variables_initializer())
self.assertAlmostEqual(4.0, gradients_to_variables[0][0].eval())
self.assertAlmostEqual(3.0, clipped_gradients_to_variables[0][0].eval())
class CreateTrainOpTest(test.TestCase):
def setUp(self):
np.random.seed(0)
# Create an easy training set:
self._inputs = np.random.rand(16, 4).astype(np.float32)
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
def testTrainOpInCollection(self):
with ops.Graph().as_default():
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
# Make sure the training op was recorded in the proper collection
self.assertTrue(train_op in ops.get_collection(ops.GraphKeys.TRAIN_OP))
def testUseUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
expected_mean = np.mean(self._inputs, axis=(0))
expected_var = np.var(self._inputs, axis=(0))
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# After 10 updates with decay 0.1 moving_mean == expected_mean and
# moving_variance == expected_var.
self.assertAllClose(mean, expected_mean)
self.assertAllClose(variance, expected_var)
def testEmptyUpdateOps(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, update_ops=[])
moving_mean = variables_lib.get_variables_by_name('moving_mean')[0]
moving_variance = variables_lib.get_variables_by_name('moving_variance')[
0]
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
mean, variance = session.run([moving_mean, moving_variance])
# After initialization moving_mean == 0 and moving_variance == 1.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
for _ in range(10):
session.run(train_op)
mean = moving_mean.eval()
variance = moving_variance.eval()
# Since we skip update_ops the moving_vars are not updated.
self.assertAllClose(mean, [0] * 4)
self.assertAllClose(variance, [1] * 4)
def testGlobalStepIsIncrementedByDefault(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
global_step = variables_lib.get_or_create_global_step()
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# After 10 updates global_step should be 10.
self.assertAllClose(global_step.eval(), 10)
def testGlobalStepNotIncrementedWhenSetToNone(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with self.test_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step.eval(), 0)
class TrainBatchNormClassifierTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertLess(loss, .1)
class TrainTest(test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainWithLocalVariable(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib.local_variable(1.0)
tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testResumeTrainAchievesRoughlyTheSameLoss(self):
number_of_steps = [300, 1, 5]
logdir = os.path.join(self.get_temp_dir(), 'resume_train_same_loss')
for i in range(len(number_of_steps)):
with ops.Graph().as_default():
random_seed.set_random_seed(i)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(
num_steps=number_of_steps[i]),
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=50, saver=saver),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
def testTrainWithInitFromCheckpoint(self):
logdir1 = os.path.join(self.get_temp_dir(), 'tmp_logs1/')
logdir2 = os.path.join(self.get_temp_dir(), 'tmp_logs2/')
if gfile.Exists(logdir1): # For running on jenkins.
gfile.DeleteRecursively(logdir1)
if gfile.Exists(logdir2): # For running on jenkins.
gfile.DeleteRecursively(logdir2)
# First, train the model one step (make sure the error is high).
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=1, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=1),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .5)
# Next, train the model to convergence.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
train_op = self.create_train_op()
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir1,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir1, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
# Finally, advance the model a single step and validate that the loss is
# still low.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
train_op = self.create_train_op()
model_variables = variables_lib2.global_variables()
model_path = saver_lib.latest_checkpoint(logdir1)
assign_fn = variables_lib.assign_from_checkpoint_fn(
model_path, model_variables)
def init_fn(_, session):
assign_fn(session)
loss = training.train(
train_op,
None,
scaffold=monitored_session.Scaffold(init_fn=init_fn),
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=1)],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .02)
def ModelLoss(self):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
return losses.get_total_loss()
def testTrainAllVarsHasLowerLossThanTrainSubsetOfVars(self):
logdir = os.path.join(self.get_temp_dir(), 'tmp_logs3/')
if gfile.Exists(logdir): # For running on jenkins.
gfile.DeleteRecursively(logdir)
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights = variables_lib.get_variables_by_name('weights')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=weights)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=200, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=200),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Next, train the biases of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(1)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
biases = variables_lib.get_variables_by_name('biases')
train_op = training.create_train_op(
total_loss, optimizer, variables_to_train=biases)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=300, saver=saver),
basic_session_run_hooks.StopAtStepHook(num_steps=300),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertGreater(loss, .015)
self.assertLess(loss, .05)
# Finally, train both weights and bias to get lower loss.
with ops.Graph().as_default():
random_seed.set_random_seed(2)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
loss = training.train(
train_op,
logdir,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=400),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
def testTrainingSubsetsOfVariablesOnlyUpdatesThoseVariables(self):
# First, train only the weights of the model.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
total_loss = self.ModelLoss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
weights, biases = variables_lib.get_variables()
train_op = training.create_train_op(total_loss, optimizer)
train_weights = training.create_train_op(
total_loss, optimizer, variables_to_train=[weights])
train_biases = training.create_train_op(
total_loss, optimizer, variables_to_train=[biases])
with self.test_session() as session:
# Initialize the variables.
session.run(variables_lib2.global_variables_initializer())
# Get the initial weights and biases values.
weights_values, biases_values = session.run([weights, biases])
self.assertGreater(np.linalg.norm(weights_values), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values), 0)
# Update weights and biases.
loss = session.run(train_op)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights and biases have been updated.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
weights_values, biases_values = new_weights, new_biases
# Update only weights.
loss = session.run(train_weights)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the weights have been updated, but biases have not.
self.assertGreater(np.linalg.norm(weights_values - new_weights), 0)
self.assertAlmostEqual(np.linalg.norm(biases_values - new_biases), 0)
weights_values = new_weights
# Update only biases.
loss = session.run(train_biases)
self.assertGreater(loss, .5)
new_weights, new_biases = session.run([weights, biases])
# Check that the biases have been updated, but weights have not.
self.assertAlmostEqual(np.linalg.norm(weights_values - new_weights), 0)
self.assertGreater(np.linalg.norm(biases_values - new_biases), 0)
def testTrainWithAlteredGradients(self):
# Use the same learning rate but different gradient multipliers
# to train two models. Model with equivalently larger learning
# rate (i.e., learning_rate * gradient_multiplier) has smaller
# training loss.
multipliers = [1., 1000.]
number_of_steps = 10
learning_rate = 0.001
# First, train the model with equivalently smaller learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[0])
loss0 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss0)
self.assertGreater(loss0, .5)
# Second, train the model with equivalently larger learning rate.
with ops.Graph().as_default():
random_seed.set_random_seed(0)
train_op = self.create_train_op(
learning_rate=learning_rate, gradient_multiplier=multipliers[1])
loss1 = training.train(
train_op,
None,
hooks=[
basic_session_run_hooks.StopAtStepHook(num_steps=number_of_steps),
],
save_checkpoint_secs=None,
save_summaries_steps=None)
self.assertIsNotNone(loss1)
self.assertLess(loss1, .5)
# The loss of the model trained with larger learning rate should
# be smaller.
self.assertGreater(loss0, loss1)
if __name__ == '__main__':
test.main()
|
narry/odenos
|
refs/heads/develop
|
src/test/python/org/o3project/__init__.py
|
233
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
|
aestrivex/mne-python
|
refs/heads/master
|
examples/stats/plot_cluster_1samp_test_time_frequency.py
|
5
|
"""
===============================================================
Non-parametric 1 sample cluster statistic on single trial power
===============================================================
This script shows how to estimate significant clusters
in time-frequency power estimates. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists in:
- extracting epochs
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if ratio deviates from 1.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.time_frequency import single_trial_power
from mne.stats import permutation_cluster_1samp_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_id = 1
tmin = -0.3
tmax = 0.6
# Setup for reading the raw data
raw = io.Raw(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=dict(grad=4000e-13, eog=150e-6))
data = epochs.get_data() # as 3D matrix
data *= 1e13 # change unit to fT / cm
# Time vector
times = 1e3 * epochs.times # change unit to ms
# Take only one channel
ch_name = raw.info['ch_names'][97]
data = data[:, 97:98, :]
evoked_data = np.mean(data, 0)
# data -= evoked_data[None,:,:] # remove evoked component
# evoked_data = np.mean(data, 0)
# Factor to down-sample the temporal dimension of the PSD computed by
# single_trial_power. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
frequencies = np.arange(8, 40, 2) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
epochs_power = single_trial_power(data, sfreq=sfreq, frequencies=frequencies,
n_cycles=4, n_jobs=1,
baseline=(-100, 0), times=times,
baseline_mode='ratio', decim=decim)
# Crop in time to keep only what is between 0 and 400 ms
time_mask = (times > 0) & (times < 400)
evoked_data = evoked_data[:, time_mask]
times = times[time_mask]
# The time vector reflects the original time points, not the decimated time
# points returned by single trial power. Be sure to decimate the time mask
# appropriately.
epochs_power = epochs_power[..., time_mask[::decim]]
epochs_power = epochs_power[:, 0, :, :]
epochs_power = np.log10(epochs_power) # take log of ratio
# under the null hypothesis epochs_power should be now be 0
###############################################################################
# Compute statistic
threshold = 2.5
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_1samp_test(epochs_power, n_permutations=100,
threshold=threshold, tail=0)
###############################################################################
# View time-frequency plots
plt.clf()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
plt.subplot(2, 1, 1)
plt.plot(times, evoked_data.T)
plt.title('Evoked response (%s)' % ch_name)
plt.xlabel('time (ms)')
plt.ylabel('Magnetic Field (fT/cm)')
plt.xlim(times[0], times[-1])
plt.ylim(-100, 250)
plt.subplot(2, 1, 2)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
vmax = np.max(np.abs(T_obs))
vmin = -vmax
plt.imshow(T_obs, cmap=plt.cm.gray,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.imshow(T_obs_plot, cmap=plt.cm.RdBu_r,
extent=[times[0], times[-1], frequencies[0], frequencies[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.colorbar()
plt.xlabel('time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title('Induced power (%s)' % ch_name)
plt.show()
|
whereismyjetpack/ansible
|
refs/heads/devel
|
lib/ansible/plugins/filter/json_query.py
|
93
|
# (c) 2015, Filipe Niero Felisbino <filipenf@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
try:
import jmespath
HAS_LIB = True
except ImportError:
HAS_LIB = False
def json_query(data, expr):
'''Query data using jmespath query language ( http://jmespath.org ). Example:
- debug: msg="{{ instance | json_query(tagged_instances[*].block_device_mapping.*.volume_id') }}"
'''
if not HAS_LIB:
raise AnsibleError('You need to install "jmespath" prior to running '
'json_query filter')
return jmespath.search(expr, data)
class FilterModule(object):
''' Query filter '''
def filters(self):
return {
'json_query': json_query
}
|
nacc/autotest
|
refs/heads/master
|
frontend/tko/models.py
|
3
|
from django.db import models as dbmodels, connection
from django.utils import datastructures
from autotest.frontend.afe import model_logic, readonly_connection
_quote_name = connection.ops.quote_name
class TempManager(model_logic.ExtendedManager):
_GROUP_COUNT_NAME = 'group_count'
def _get_key_unless_is_function(self, field):
if '(' in field:
return field
return self.get_key_on_this_table(field)
def _get_field_names(self, fields, extra_select_fields={}):
field_names = []
for field in fields:
if field in extra_select_fields:
field_names.append(extra_select_fields[field][0])
else:
field_names.append(self._get_key_unless_is_function(field))
return field_names
def _get_group_query_sql(self, query, group_by):
compiler = query.query.get_compiler(using=query.db)
sql, params = compiler.as_sql()
# insert GROUP BY clause into query
group_fields = self._get_field_names(group_by, query.query.extra_select)
group_by_clause = ' GROUP BY ' + ', '.join(group_fields)
group_by_position = sql.rfind('ORDER BY')
if group_by_position == -1:
group_by_position = len(sql)
sql = (sql[:group_by_position] +
group_by_clause + ' ' +
sql[group_by_position:])
return sql, params
def _get_column_names(self, cursor):
"""
Gets the column names from the cursor description. This method exists
so that it can be mocked in the unit test for sqlite3 compatibility.
"""
return [column_info[0] for column_info in cursor.description]
def execute_group_query(self, query, group_by):
"""
Performs the given query grouped by the fields in group_by with the
given query's extra select fields added. Returns a list of dicts, where
each dict corresponds to single row and contains a key for each grouped
field as well as all of the extra select fields.
"""
sql, params = self._get_group_query_sql(query, group_by)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql, params)
field_names = self._get_column_names(cursor)
row_dicts = [dict(zip(field_names, row)) for row in cursor.fetchall()]
return row_dicts
def get_count_sql(self, query):
"""
Get the SQL to properly select a per-group count of unique matches for
a grouped query. Returns a tuple (field alias, field SQL)
"""
if query.query.distinct:
pk_field = self.get_key_on_this_table()
count_sql = 'COUNT(DISTINCT %s)' % pk_field
else:
count_sql = 'COUNT(1)'
return self._GROUP_COUNT_NAME, count_sql
def _get_num_groups_sql(self, query, group_by):
group_fields = self._get_field_names(group_by, query.query.extra_select)
query = query.order_by() # this can mess up the query and isn't needed
compiler = query.query.get_compiler(using=query.db)
sql, params = compiler.as_sql()
from_ = sql[sql.find(' FROM'):]
return ('SELECT DISTINCT %s %s' % (','.join(group_fields),
from_),
params)
def _cursor_rowcount(self, cursor):
"""To be stubbed by tests"""
return cursor.rowcount
def get_num_groups(self, query, group_by):
"""
Returns the number of distinct groups for the given query grouped by the
fields in group_by.
"""
sql, params = self._get_num_groups_sql(query, group_by)
cursor = readonly_connection.connection().cursor()
cursor.execute(sql, params)
return self._cursor_rowcount(cursor)
class Machine(dbmodels.Model):
machine_idx = dbmodels.AutoField(primary_key=True)
hostname = dbmodels.CharField(unique=True, max_length=255)
machine_group = dbmodels.CharField(blank=True, max_length=240)
owner = dbmodels.CharField(blank=True, max_length=240)
class Meta:
db_table = 'tko_machines'
class Kernel(dbmodels.Model):
kernel_idx = dbmodels.AutoField(primary_key=True)
kernel_hash = dbmodels.CharField(max_length=105, editable=False)
base = dbmodels.CharField(max_length=90)
printable = dbmodels.CharField(max_length=300)
class Meta:
db_table = 'tko_kernels'
class Patch(dbmodels.Model):
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
name = dbmodels.CharField(blank=True, max_length=240)
url = dbmodels.CharField(blank=True, max_length=900)
the_hash = dbmodels.CharField(blank=True, max_length=105, db_column='hash')
class Meta:
db_table = 'tko_patches'
class Status(dbmodels.Model):
status_idx = dbmodels.AutoField(primary_key=True)
word = dbmodels.CharField(max_length=30)
class Meta:
db_table = 'tko_status'
class Job(dbmodels.Model, model_logic.ModelExtensions):
job_idx = dbmodels.AutoField(primary_key=True)
tag = dbmodels.CharField(unique=True, max_length=100)
label = dbmodels.CharField(max_length=300)
username = dbmodels.CharField(max_length=240)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
queued_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
finished_time = dbmodels.DateTimeField(null=True, blank=True)
afe_job_id = dbmodels.IntegerField(null=True, default=None)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_jobs'
class JobKeyval(dbmodels.Model):
job = dbmodels.ForeignKey(Job)
key = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
class Meta:
db_table = 'tko_job_keyvals'
class Test(dbmodels.Model, model_logic.ModelExtensions,
model_logic.ModelWithAttributes):
test_idx = dbmodels.AutoField(primary_key=True)
job = dbmodels.ForeignKey(Job, db_column='job_idx')
test = dbmodels.CharField(max_length=300)
subdir = dbmodels.CharField(blank=True, max_length=300)
kernel = dbmodels.ForeignKey(Kernel, db_column='kernel_idx')
status = dbmodels.ForeignKey(Status, db_column='status')
reason = dbmodels.CharField(blank=True, max_length=3072)
machine = dbmodels.ForeignKey(Machine, db_column='machine_idx')
finished_time = dbmodels.DateTimeField(null=True, blank=True)
started_time = dbmodels.DateTimeField(null=True, blank=True)
objects = model_logic.ExtendedManager()
def _get_attribute_model_and_args(self, attribute):
return TestAttribute, dict(test=self, attribute=attribute,
user_created=True)
def set_attribute(self, attribute, value):
# ensure non-user-created attributes remain immutable
try:
TestAttribute.objects.get(test=self, attribute=attribute,
user_created=False)
raise ValueError('Attribute %s already exists for test %s and is '
'immutable' % (attribute, self.test_idx))
except TestAttribute.DoesNotExist:
super(Test, self).set_attribute(attribute, value)
class Meta:
db_table = 'tko_tests'
class TestAttribute(dbmodels.Model, model_logic.ModelExtensions):
test = dbmodels.ForeignKey(Test, db_column='test_idx')
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
user_created = dbmodels.BooleanField(default=False)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_test_attributes'
class IterationAttribute(dbmodels.Model, model_logic.ModelExtensions):
# this isn't really a primary key, but it's necessary to appease Django
# and is harmless as long as we're careful
test = dbmodels.ForeignKey(Test, db_column='test_idx', primary_key=True)
iteration = dbmodels.IntegerField()
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.CharField(blank=True, max_length=300)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_iteration_attributes'
class IterationResult(dbmodels.Model, model_logic.ModelExtensions):
# see comment on IterationAttribute regarding primary_key=True
test = dbmodels.ForeignKey(Test, db_column='test_idx', primary_key=True)
iteration = dbmodels.IntegerField()
attribute = dbmodels.CharField(max_length=90)
value = dbmodels.FloatField(null=True, blank=True)
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_iteration_result'
class TestLabel(dbmodels.Model, model_logic.ModelExtensions):
name = dbmodels.CharField(max_length=80, unique=True)
description = dbmodels.TextField(blank=True)
tests = dbmodels.ManyToManyField(Test, blank=True,
db_table='tko_test_labels_tests')
name_field = 'name'
objects = model_logic.ExtendedManager()
class Meta:
db_table = 'tko_test_labels'
class SavedQuery(dbmodels.Model, model_logic.ModelExtensions):
# TODO: change this to foreign key once DBs are merged
owner = dbmodels.CharField(max_length=80)
name = dbmodels.CharField(max_length=100)
url_token = dbmodels.TextField()
class Meta:
db_table = 'tko_saved_queries'
class EmbeddedGraphingQuery(dbmodels.Model, model_logic.ModelExtensions):
url_token = dbmodels.TextField(null=False, blank=False)
graph_type = dbmodels.CharField(max_length=16, null=False, blank=False)
params = dbmodels.TextField(null=False, blank=False)
last_updated = dbmodels.DateTimeField(null=False, blank=False,
editable=False)
# refresh_time shows the time at which a thread is updating the cached
# image, or NULL if no one is updating the image. This is used so that only
# one thread is updating the cached image at a time (see
# graphing_utils.handle_plot_request)
refresh_time = dbmodels.DateTimeField(editable=False)
cached_png = dbmodels.TextField(editable=False)
class Meta:
db_table = 'tko_embedded_graphing_queries'
# views
class TestViewManager(TempManager):
def get_query_set(self):
query = super(TestViewManager, self).get_query_set()
# add extra fields to selects, using the SQL itself as the "alias"
extra_select = dict((sql, sql)
for sql in self.model.extra_fields.iterkeys())
return query.extra(select=extra_select)
def _get_include_exclude_suffix(self, exclude):
if exclude:
return '_exclude'
return '_include'
def _add_attribute_join(self, query_set, join_condition,
suffix=None, exclude=False):
if suffix is None:
suffix = self._get_include_exclude_suffix(exclude)
return self.add_join(query_set, 'tko_test_attributes',
join_key='test_idx',
join_condition=join_condition,
suffix=suffix, exclude=exclude)
def _add_label_pivot_table_join(self, query_set, suffix, join_condition='',
exclude=False, force_left_join=False):
return self.add_join(query_set, 'tko_test_labels_tests',
join_key='test_id',
join_condition=join_condition,
suffix=suffix, exclude=exclude,
force_left_join=force_left_join)
def _add_label_joins(self, query_set, suffix=''):
query_set = self._add_label_pivot_table_join(
query_set, suffix=suffix, force_left_join=True)
# since we're not joining from the original table, we can't use
# self.add_join() again
second_join_alias = 'tko_test_labels' + suffix
second_join_condition = ('%s.id = %s.testlabel_id' %
(second_join_alias,
'tko_test_labels_tests' + suffix))
query_set.query.add_custom_join('tko_test_labels',
second_join_condition,
query_set.query.LOUTER,
alias=second_join_alias)
return query_set
def _get_label_ids_from_names(self, label_names):
label_ids = list( # listifying avoids a double query below
TestLabel.objects.filter(name__in=label_names)
.values_list('name', 'id'))
if len(label_ids) < len(set(label_names)):
raise ValueError('Not all labels found: %s' %
', '.join(label_names))
return dict(name_and_id for name_and_id in label_ids)
def _include_or_exclude_labels(self, query_set, label_names, exclude=False):
label_ids = self._get_label_ids_from_names(label_names).itervalues()
suffix = self._get_include_exclude_suffix(exclude)
condition = ('tko_test_labels_tests%s.testlabel_id IN (%s)' %
(suffix,
','.join(str(label_id) for label_id in label_ids)))
return self._add_label_pivot_table_join(query_set,
join_condition=condition,
suffix=suffix,
exclude=exclude)
def _add_custom_select(self, query_set, select_name, select_sql):
return query_set.extra(select={select_name: select_sql})
def _add_select_value(self, query_set, alias):
return self._add_custom_select(query_set, alias,
_quote_name(alias) + '.value')
def _add_select_ifnull(self, query_set, alias, non_null_value):
select_sql = "IF(%s.id IS NOT NULL, '%s', NULL)" % (_quote_name(alias),
non_null_value)
return self._add_custom_select(query_set, alias, select_sql)
def _join_test_label_column(self, query_set, label_name, label_id):
alias = 'test_label_' + label_name
label_query = TestLabel.objects.filter(name=label_name)
query_set = Test.objects.join_custom_field(query_set, label_query,
alias)
query_set = self._add_select_ifnull(query_set, alias, label_name)
return query_set
def _join_test_label_columns(self, query_set, label_names):
label_id_map = self._get_label_ids_from_names(label_names)
for label_name in label_names:
query_set = self._join_test_label_column(query_set, label_name,
label_id_map[label_name])
return query_set
def _join_test_attribute(self, query_set, attribute, alias=None,
extra_join_condition=None):
"""
Join the given TestView QuerySet to TestAttribute. The resulting query
has an additional column for the given attribute named
"attribute_<attribute name>".
"""
if not alias:
alias = 'test_attribute_' + attribute
attribute_query = TestAttribute.objects.filter(attribute=attribute)
if extra_join_condition:
attribute_query = attribute_query.extra(
where=[extra_join_condition])
query_set = Test.objects.join_custom_field(query_set, attribute_query,
alias)
query_set = self._add_select_value(query_set, alias)
return query_set
def _join_machine_label_columns(self, query_set, machine_label_names):
for label_name in machine_label_names:
alias = 'machine_label_' + label_name
condition = "FIND_IN_SET('%s', %s)" % (
label_name, _quote_name(alias) + '.value')
query_set = self._join_test_attribute(
query_set, 'host-labels',
alias=alias, extra_join_condition=condition)
query_set = self._add_select_ifnull(query_set, alias, label_name)
return query_set
def _join_one_iteration_key(self, query_set, result_key, first_alias=None):
alias = 'iteration_result_' + result_key
iteration_query = IterationResult.objects.filter(attribute=result_key)
if first_alias:
# after the first join, we need to match up iteration indices,
# otherwise each join will expand the query by the number of
# iterations and we'll have extraneous rows
iteration_query = iteration_query.extra(
where=['%s.iteration = %s.iteration'
% (_quote_name(alias), _quote_name(first_alias))])
query_set = Test.objects.join_custom_field(query_set, iteration_query,
alias, left_join=False)
# select the iteration value and index for this join
query_set = self._add_select_value(query_set, alias)
if not first_alias:
# for first join, add iteration index select too
query_set = self._add_custom_select(
query_set, 'iteration_index',
_quote_name(alias) + '.iteration')
return query_set, alias
def _join_iteration_results(self, test_view_query_set, result_keys):
"""Join the given TestView QuerySet to IterationResult for one result.
The resulting query looks like a TestView query but has one row per
iteration. Each row includes all the attributes of TestView, an
attribute for each key in result_keys and an iteration_index attribute.
We accomplish this by joining the TestView query to IterationResult
once per result key. Each join is restricted on the result key (and on
the test index, like all one-to-many joins). For the first join, this
is the only restriction, so each TestView row expands to a row per
iteration (per iteration that includes the key, of course). For each
subsequent join, we also restrict the iteration index to match that of
the initial join. This makes each subsequent join produce exactly one
result row for each input row. (This assumes each iteration contains
the same set of keys. Results are undefined if that's not true.)
"""
if not result_keys:
return test_view_query_set
query_set, first_alias = self._join_one_iteration_key(
test_view_query_set, result_keys[0])
for result_key in result_keys[1:]:
query_set, _ = self._join_one_iteration_key(query_set, result_key,
first_alias=first_alias)
return query_set
def _join_job_keyvals(self, query_set, job_keyvals):
for job_keyval in job_keyvals:
alias = 'job_keyval_' + job_keyval
keyval_query = JobKeyval.objects.filter(key=job_keyval)
query_set = Job.objects.join_custom_field(query_set, keyval_query,
alias)
query_set = self._add_select_value(query_set, alias)
return query_set
def _join_iteration_attributes(self, query_set, iteration_attributes):
for attribute in iteration_attributes:
alias = 'iteration_attribute_' + attribute
attribute_query = IterationAttribute.objects.filter(
attribute=attribute)
query_set = Test.objects.join_custom_field(query_set,
attribute_query, alias)
query_set = self._add_select_value(query_set, alias)
return query_set
def get_query_set_with_joins(self, filter_data):
"""
Add joins for querying over test-related items.
These parameters are supported going forward:
* test_attribute_fields: list of attribute names. Each attribute will
be available as a column attribute_<name>.value.
* test_label_fields: list of label names. Each label will be available
as a column label_<name>.id, non-null iff the label is present.
* iteration_result_fields: list of iteration result names. Each
result will be available as a column iteration_<name>.value.
Note that this changes the semantics to return iterations
instead of tests -- if a test has multiple iterations, a row
will be returned for each one. The iteration index is also
available as iteration_<name>.iteration.
* machine_label_fields: list of machine label names. Each will be
available as a column machine_label_<name>.id, non-null iff the
label is present on the machine used in the test.
* job_keyval_fields: list of job keyval names. Each value will be
available as a column job_keyval_<name>.id, non-null iff the
keyval is present in the AFE job.
* iteration_attribute_fields: list of iteration attribute names. Each
attribute will be available as a column
iteration_attribute<name>.id, non-null iff the attribute is
present.
These parameters are deprecated:
* include_labels
* exclude_labels
* include_attributes_where
* exclude_attributes_where
Additionally, this method adds joins if the following strings are
present in extra_where (this is also deprecated):
* test_labels
* test_attributes_host_labels
"""
query_set = self.get_query_set()
test_attributes = filter_data.pop('test_attribute_fields', [])
for attribute in test_attributes:
query_set = self._join_test_attribute(query_set, attribute)
test_labels = filter_data.pop('test_label_fields', [])
query_set = self._join_test_label_columns(query_set, test_labels)
machine_labels = filter_data.pop('machine_label_fields', [])
query_set = self._join_machine_label_columns(query_set, machine_labels)
iteration_keys = filter_data.pop('iteration_result_fields', [])
query_set = self._join_iteration_results(query_set, iteration_keys)
job_keyvals = filter_data.pop('job_keyval_fields', [])
query_set = self._join_job_keyvals(query_set, job_keyvals)
iteration_attributes = filter_data.pop('iteration_attribute_fields', [])
query_set = self._join_iteration_attributes(query_set,
iteration_attributes)
# everything that follows is deprecated behavior
joined = False
extra_where = filter_data.get('extra_where', '')
if 'tko_test_labels' in extra_where:
query_set = self._add_label_joins(query_set)
joined = True
include_labels = filter_data.pop('include_labels', [])
exclude_labels = filter_data.pop('exclude_labels', [])
if include_labels:
query_set = self._include_or_exclude_labels(query_set,
include_labels)
joined = True
if exclude_labels:
query_set = self._include_or_exclude_labels(query_set,
exclude_labels,
exclude=True)
joined = True
include_attributes_where = filter_data.pop('include_attributes_where',
'')
exclude_attributes_where = filter_data.pop('exclude_attributes_where',
'')
if include_attributes_where:
query_set = self._add_attribute_join(
query_set,
join_condition=self.escape_user_sql(include_attributes_where))
joined = True
if exclude_attributes_where:
query_set = self._add_attribute_join(
query_set,
join_condition=self.escape_user_sql(exclude_attributes_where),
exclude=True)
joined = True
if not joined:
filter_data['no_distinct'] = True
if 'tko_test_attributes_host_labels' in extra_where:
query_set = self._add_attribute_join(
query_set, suffix='_host_labels',
join_condition='tko_test_attributes_host_labels.attribute = '
'"host-labels"')
return query_set
def query_test_ids(self, filter_data, apply_presentation=True):
query = self.model.query_objects(filter_data,
apply_presentation=apply_presentation)
dicts = query.values('test_idx')
return [item['test_idx'] for item in dicts]
def query_test_label_ids(self, filter_data):
query_set = self.model.query_objects(filter_data)
query_set = self._add_label_joins(query_set, suffix='_list')
rows = self._custom_select_query(query_set, ['tko_test_labels_list.id'])
return [row[0] for row in rows if row[0] is not None]
def escape_user_sql(self, sql):
sql = super(TestViewManager, self).escape_user_sql(sql)
return sql.replace('test_idx', self.get_key_on_this_table('test_idx'))
class TestView(dbmodels.Model, model_logic.ModelExtensions):
extra_fields = {
'DATE(job_queued_time)': 'job queued day',
'DATE(test_finished_time)': 'test finished day',
}
group_fields = [
'test_name',
'status',
'kernel',
'hostname',
'job_tag',
'job_name',
'platform',
'reason',
'job_owner',
'job_queued_time',
'DATE(job_queued_time)',
'test_started_time',
'test_finished_time',
'DATE(test_finished_time)',
]
test_idx = dbmodels.IntegerField('test index', primary_key=True)
job_idx = dbmodels.IntegerField('job index', null=True, blank=True)
test_name = dbmodels.CharField(blank=True, max_length=90)
subdir = dbmodels.CharField('subdirectory', blank=True, max_length=180)
kernel_idx = dbmodels.IntegerField('kernel index')
status_idx = dbmodels.IntegerField('status index')
reason = dbmodels.CharField(blank=True, max_length=3072)
machine_idx = dbmodels.IntegerField('host index')
test_started_time = dbmodels.DateTimeField(null=True, blank=True)
test_finished_time = dbmodels.DateTimeField(null=True, blank=True)
job_tag = dbmodels.CharField(blank=True, max_length=300)
job_name = dbmodels.CharField(blank=True, max_length=300)
job_owner = dbmodels.CharField('owner', blank=True, max_length=240)
job_queued_time = dbmodels.DateTimeField(null=True, blank=True)
job_started_time = dbmodels.DateTimeField(null=True, blank=True)
job_finished_time = dbmodels.DateTimeField(null=True, blank=True)
afe_job_id = dbmodels.IntegerField(null=True)
hostname = dbmodels.CharField(blank=True, max_length=300)
platform = dbmodels.CharField(blank=True, max_length=240)
machine_owner = dbmodels.CharField(blank=True, max_length=240)
kernel_hash = dbmodels.CharField(blank=True, max_length=105)
kernel_base = dbmodels.CharField(blank=True, max_length=90)
kernel = dbmodels.CharField(blank=True, max_length=300)
status = dbmodels.CharField(blank=True, max_length=30)
objects = TestViewManager()
def save(self):
raise NotImplementedError('TestView is read-only')
def delete(self):
raise NotImplementedError('TestView is read-only')
@classmethod
def query_objects(cls, filter_data, initial_query=None,
apply_presentation=True):
if initial_query is None:
initial_query = cls.objects.get_query_set_with_joins(filter_data)
return super(TestView, cls).query_objects(
filter_data, initial_query=initial_query,
apply_presentation=apply_presentation)
class Meta:
db_table = 'tko_test_view_2'
|
calfonso/ansible
|
refs/heads/devel
|
test/units/modules/network/nxos/test_nxos_vxlan_vtep.py
|
57
|
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vxlan_vtep
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVxlanVtepVniModule(TestNxosModule):
module = nxos_vxlan_vtep
def setUp(self):
super(TestNxosVxlanVtepVniModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosVxlanVtepVniModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_vxlan_vtep', 'config.cfg')
self.load_config.return_value = None
def test_nxos_vxlan_vtep(self):
set_module_args(dict(interface='nve1', description='simple description'))
self.execute_module(changed=True, commands=['interface nve1', 'description simple description'])
def test_nxos_vxlan_vtep_present_no_change(self):
set_module_args(dict(interface='nve1'))
self.execute_module(changed=False, commands=[])
def test_nxos_vxlan_vtep_absent(self):
set_module_args(dict(interface='nve1', state='absent'))
self.execute_module(changed=True, commands=['no interface nve1'])
def test_nxos_vxlan_vtep_absent_no_change(self):
set_module_args(dict(interface='nve2', state='absent'))
self.execute_module(changed=False, commands=[])
|
mewtaylor/django
|
refs/heads/master
|
django/core/management/commands/test.py
|
267
|
import logging
import os
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.utils import get_runner
class Command(BaseCommand):
help = 'Discover and run tests in the specified modules or the current directory.'
requires_system_checks = False
def __init__(self):
self.test_runner = None
super(Command, self).__init__()
def run_from_argv(self, argv):
"""
Pre-parse the command line to extract the value of the --testrunner
option. This allows a test runner to define additional command line
arguments.
"""
option = '--testrunner='
for arg in argv[2:]:
if arg.startswith(option):
self.test_runner = arg[len(option):]
break
super(Command, self).run_from_argv(argv)
def add_arguments(self, parser):
parser.add_argument('args', metavar='test_label', nargs='*',
help='Module paths to test; can be modulename, modulename.TestCase or modulename.TestCase.test_method')
parser.add_argument('--noinput',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
parser.add_argument('--failfast',
action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first '
'failed test.'),
parser.add_argument('--testrunner',
action='store', dest='testrunner',
help='Tells Django to use specified test runner class instead of '
'the one specified by the TEST_RUNNER setting.'),
parser.add_argument('--liveserver',
action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used '
'with LiveServerTestCase) is expected to run from. The '
'default value is localhost:8081.'),
test_runner_class = get_runner(settings, self.test_runner)
if hasattr(test_runner_class, 'option_list'):
# Keeping compatibility with both optparse and argparse at this level
# would be too heavy for a non-critical item
raise RuntimeError(
"The method to extend accepted command-line arguments by the "
"test management command has changed in Django 1.8. Please "
"create an add_arguments class method to achieve this.")
if hasattr(test_runner_class, 'add_arguments'):
test_runner_class.add_arguments(parser)
def execute(self, *args, **options):
if options['verbosity'] > 0:
# ensure that deprecation warnings are displayed during testing
# the following state is assumed:
# logging.capturewarnings is true
# a "default" level warnings filter has been added for
# DeprecationWarning. See django.conf.LazySettings._configure_logging
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
super(Command, self).execute(*args, **options)
if options['verbosity'] > 0:
# remove the testing-specific handler
logger.removeHandler(handler)
def handle(self, *test_labels, **options):
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings, options.get('testrunner'))
if options.get('liveserver') is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options['liveserver']
del options['liveserver']
test_runner = TestRunner(**options)
failures = test_runner.run_tests(test_labels)
if failures:
sys.exit(bool(failures))
|
baylee-d/osf.io
|
refs/heads/develop
|
addons/base/generic_views.py
|
9
|
"""Generic add-on view factories"""
# -*- coding: utf-8 -*-
from rest_framework import status as http_status
from flask import request
from framework.exceptions import HTTPError, PermissionsError
from framework.auth.decorators import must_be_logged_in
from osf.models import ExternalAccount
from osf.utils import permissions
from website.project.decorators import (
must_have_addon, must_be_addon_authorizer,
must_have_permission, must_not_be_registration,
must_be_valid_project
)
def import_auth(addon_short_name, Serializer):
@must_have_addon(addon_short_name, 'user')
@must_have_addon(addon_short_name, 'node')
@must_have_permission(permissions.WRITE)
def _import_auth(auth, node_addon, user_addon, **kwargs):
"""Import add-on credentials from the currently logged-in user to a node.
"""
external_account = ExternalAccount.load(
request.json['external_account_id']
)
if not user_addon.external_accounts.filter(id=external_account.id).exists():
raise HTTPError(http_status.HTTP_403_FORBIDDEN)
try:
node_addon.set_auth(external_account, user_addon.owner)
except PermissionsError:
raise HTTPError(http_status.HTTP_403_FORBIDDEN)
node_addon.save()
return {
'result': Serializer().serialize_settings(node_addon, auth.user),
'message': 'Successfully imported access token from profile.',
}
_import_auth.__name__ = '{0}_import_auth'.format(addon_short_name)
return _import_auth
def account_list(addon_short_name, Serializer):
@must_be_logged_in
def _account_list(auth):
user_settings = auth.user.get_addon(addon_short_name)
serializer = Serializer(user_settings=user_settings)
return serializer.serialized_user_settings
_account_list.__name__ = '{0}_account_list'.format(addon_short_name)
return _account_list
def folder_list(addon_short_name, addon_full_name, get_folders):
# TODO [OSF-6678]: Generalize this for API use after node settings have been refactored
@must_have_addon(addon_short_name, 'node')
@must_be_addon_authorizer(addon_short_name)
def _folder_list(node_addon, **kwargs):
"""Returns a list of folders"""
if not node_addon.has_auth:
raise HTTPError(http_status.HTTP_403_FORBIDDEN)
folder_id = request.args.get('folderId')
return get_folders(node_addon, folder_id)
_folder_list.__name__ = '{0}_folder_list'.format(addon_short_name)
return _folder_list
def get_config(addon_short_name, Serializer):
@must_be_logged_in
@must_have_addon(addon_short_name, 'node')
@must_be_valid_project
@must_have_permission(permissions.WRITE)
def _get_config(node_addon, auth, **kwargs):
"""API that returns the serialized node settings."""
return {
'result': Serializer().serialize_settings(
node_addon,
auth.user
)
}
_get_config.__name__ = '{0}_get_config'.format(addon_short_name)
return _get_config
def set_config(addon_short_name, addon_full_name, Serializer, set_folder):
@must_not_be_registration
@must_have_addon(addon_short_name, 'user')
@must_have_addon(addon_short_name, 'node')
@must_be_addon_authorizer(addon_short_name)
@must_have_permission(permissions.WRITE)
def _set_config(node_addon, user_addon, auth, **kwargs):
"""View for changing a node's linked folder."""
folder = request.json.get('selected')
set_folder(node_addon, folder, auth)
path = node_addon.folder_path
return {
'result': {
'folder': {
'name': path.replace('All Files', '') if path != '/' else '/ (Full {0})'.format(
addon_full_name
),
'path': path,
},
'urls': Serializer(node_settings=node_addon).addon_serialized_urls,
},
'message': 'Successfully updated settings.',
}
_set_config.__name__ = '{0}_set_config'.format(addon_short_name)
return _set_config
def deauthorize_node(addon_short_name):
@must_not_be_registration
@must_have_addon(addon_short_name, 'node')
@must_have_permission(permissions.WRITE)
def _deauthorize_node(auth, node_addon, **kwargs):
node_addon.deauthorize(auth=auth)
node_addon.save()
_deauthorize_node.__name__ = '{0}_deauthorize_node'.format(addon_short_name)
return _deauthorize_node
|
funkyfuture/cerberus
|
refs/heads/master
|
cerberus/tests/test_rule_…length.py
|
2
|
from random import choice
from string import ascii_lowercase
from cerberus import errors
from cerberus.tests import assert_fail, assert_success
def test_minlength_and_maxlength_with_list(schema):
field = 'a_list_length'
min_length = schema[field]['minlength']
max_length = schema[field]['maxlength']
assert_fail(
{field: [1] * (min_length - 1)},
error=(
field,
(field, 'minlength'),
errors.MIN_LENGTH,
min_length,
(min_length - 1,),
),
)
for i in range(min_length, max_length):
assert_success({field: [1] * i})
assert_fail(
{field: [1] * (max_length + 1)},
error=(
field,
(field, 'maxlength'),
errors.MAX_LENGTH,
max_length,
(max_length + 1,),
),
)
def test_maxlength_fails(schema):
field = 'a_string'
max_length = schema[field]['maxlength']
value = "".join(choice(ascii_lowercase) for i in range(max_length + 1))
assert_fail(
document={field: value},
error=(
field,
(field, 'maxlength'),
errors.MAX_LENGTH,
max_length,
(len(value),),
),
)
def test_maxlength_with_bytestring_fails(schema):
field = 'a_bytestring'
max_length = schema[field]['maxlength']
value = b'\x00' * (max_length + 1)
assert_fail(
document={field: value},
error=(
field,
(field, 'maxlength'),
errors.MAX_LENGTH,
max_length,
(len(value),),
),
)
def test_minlength_fails(schema):
field = 'a_string'
min_length = schema[field]['minlength']
value = "".join(choice(ascii_lowercase) for i in range(min_length - 1))
assert_fail(
document={field: value},
error=(
field,
(field, 'minlength'),
errors.MIN_LENGTH,
min_length,
(len(value),),
),
)
def test_minlength_with_bytestring_fails(schema):
field = 'a_bytestring'
min_length = schema[field]['minlength']
value = b'\x00' * (min_length - 1)
assert_fail(
document={field: value},
error=(
field,
(field, 'minlength'),
errors.MIN_LENGTH,
min_length,
(len(value),),
),
)
def test_minlength_with_dict():
schema = {'dict': {'minlength': 1}}
assert_fail(document={'dict': {}}, schema=schema)
assert_success(document={'dict': {'foo': 'bar'}}, schema=schema)
|
75651/kbengine_cloud
|
refs/heads/master
|
kbe/src/lib/python/Lib/nturl2path.py
|
89
|
"""Convert a NT pathname to a file URL and vice versa."""
def url2pathname(url):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
# e.g.
# ///C|/foo/bar/spam.foo
# becomes
# C:\foo\bar\spam.foo
import string, urllib.parse
# Windows itself uses ":" even in URLs.
url = url.replace(':', '|')
if not '|' in url:
# No drive specifier, just convert slashes
if url[:4] == '////':
# path is something like ////host/path/on/remote/host
# convert this to \\host\path\on\remote\host
# (notice halving of slashes at the start of the path)
url = url[2:]
components = url.split('/')
# make sure not to convert quoted slashes :-)
return urllib.parse.unquote('\\'.join(components))
comp = url.split('|')
if len(comp) != 2 or comp[0][-1] not in string.ascii_letters:
error = 'Bad URL: ' + url
raise OSError(error)
drive = comp[0][-1].upper()
components = comp[1].split('/')
path = drive + ':'
for comp in components:
if comp:
path = path + '\\' + urllib.parse.unquote(comp)
# Issue #11474 - handing url such as |c/|
if path.endswith(':') and url.endswith('/'):
path += '\\'
return path
def pathname2url(p):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
# e.g.
# C:\foo\bar\spam.foo
# becomes
# ///C|/foo/bar/spam.foo
import urllib.parse
if not ':' in p:
# No drive specifier, just convert slashes and quote the name
if p[:2] == '\\\\':
# path is something like \\host\path\on\remote\host
# convert this to ////host/path/on/remote/host
# (notice doubling of slashes at the start of the path)
p = '\\\\' + p
components = p.split('\\')
return urllib.parse.quote('/'.join(components))
comp = p.split(':')
if len(comp) != 2 or len(comp[0]) > 1:
error = 'Bad path: ' + p
raise OSError(error)
drive = urllib.parse.quote(comp[0].upper())
components = comp[1].split('\\')
path = '///' + drive + ':'
for comp in components:
if comp:
path = path + '/' + urllib.parse.quote(comp)
return path
|
mou4e/zirconium
|
refs/heads/master
|
tools/valgrind/browser_wrapper_win.py
|
76
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import re
import sys
import subprocess
# TODO(timurrrr): we may use it on POSIX too to avoid code duplication once we
# support layout_tests, remove Dr. Memory specific code and verify it works
# on a "clean" Mac.
testcase_name = None
for arg in sys.argv:
m = re.match("\-\-gtest_filter=(.*)", arg)
if m:
assert testcase_name is None
testcase_name = m.groups()[0]
# arg #0 is the path to this python script
cmd_to_run = sys.argv[1:]
# TODO(timurrrr): this is Dr. Memory-specific
# Usually, we pass "-logdir" "foo\bar\spam path" args to Dr. Memory.
# To group reports per UI test, we want to put the reports for each test into a
# separate directory. This code can be simplified when we have
# https://github.com/DynamoRIO/drmemory/issues/684 fixed.
logdir_idx = cmd_to_run.index("-logdir")
old_logdir = cmd_to_run[logdir_idx + 1]
wrapper_pid = str(os.getpid())
# On Windows, there is a chance of PID collision. We avoid it by appending the
# number of entries in the logdir at the end of wrapper_pid.
# This number is monotonic and we can't have two simultaneously running wrappers
# with the same PID.
wrapper_pid += "_%d" % len(glob.glob(old_logdir + "\\*"))
cmd_to_run[logdir_idx + 1] += "\\testcase.%s.logs" % wrapper_pid
os.makedirs(cmd_to_run[logdir_idx + 1])
if testcase_name:
f = open(old_logdir + "\\testcase.%s.name" % wrapper_pid, "w")
print >>f, testcase_name
f.close()
exit(subprocess.call(cmd_to_run))
|
a40223217/cadpw12
|
refs/heads/master
|
wsgi/application_orig.py
|
135
|
################################# 1. 宣告原始碼 coding, 導入必要模組
#coding=utf-8
import cherrypy
import random
# for path setup
import os
# for mako
from mako.lookup import TemplateLookup
################################# 2. 全域變數設定, 近端與遠端目錄設定
cwd = os.getcwd()
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示程式在雲端執行
template_root_dir = os.environ['OPENSHIFT_REPO_DIR']+"/wsgi/static"
data_dir = os.environ['OPENSHIFT_DATA_DIR']
else:
# 表示程式在近端執行
template_root_dir = cwd+"/static"
data_dir = cwd+"/local_data"
################################# 3. 定義主要類別 Guess
class Guess(object):
# 標準答案必須利用 session 機制儲存
_cp_config = {
# 配合 utf-8 格式之表單內容
# 若沒有 utf-8 encoding 設定,則表單不可輸入中文
'tools.encode.encoding': 'utf-8',
# 加入 session 設定
'tools.sessions.on' : True,
'tools.sessions.storage_type' : 'file',
'tools.sessions.locking' : 'early',
'tools.sessions.storage_path' : data_dir+'/tmp',
# 內定的 session timeout 時間為 60 分鐘
'tools.sessions.timeout' : 60,
'tools.mako.directories' : template_root_dir+"/templates"
}
def __init__(self):
if not os.path.isdir(data_dir+"/tmp"):
try:
os.makedirs(data_dir+"/tmp")
except:
print("mkdir error")
@cherrypy.expose
def index(self, guess=None):
# 將標準答案存入 answer session 對應區
theanswer = random.randint(1, 100)
thecount = 0
# 將答案與計算次數變數存進 session 對應變數
cherrypy.session['answer'] = theanswer
cherrypy.session['count'] = thecount
套稿查詢 = TemplateLookup(directories=[template_root_dir+"/templates"])
# 必須要從 templates 目錄取出 index.html
內建頁面 = 套稿查詢.get_template("index.html")
return 內建頁面.render()
@cherrypy.expose
def default(self, attr='default'):
# 內建 default 方法, 找不到執行方法時, 會執行此方法
套稿查詢 = TemplateLookup(directories=[template_root_dir+"/templates"])
# 必須要從 templates 目錄取出 index.html
內建頁面 = 套稿查詢.get_template("default.html")
return 內建頁面.render()
@cherrypy.expose
def doCheck(self, guess=None):
# 假如使用者直接執行 doCheck, 則設法轉回根方法
if guess is None:
raise cherrypy.HTTPRedirect("/")
# 從 session 取出 answer 對應資料, 且處理直接執行 doCheck 時無法取 session 值情況
try:
theanswer = int(cherrypy.session.get('answer'))
except:
raise cherrypy.HTTPRedirect("/")
套稿查詢 = TemplateLookup(directories=[template_root_dir+"/templates"])
# 必須要從 templates 目錄取出 index.html
內建頁面 = 套稿查詢.get_template("docheck.html")
# 經由表單所取得的 guess 資料型別為 string
try:
theguess = int(guess)
except:
return 內建頁面.render(輸入="error")
cherrypy.session['count'] += 1
if theanswer < theguess:
return 內建頁面.render(輸入="big", theanswer=theanswer)
elif theanswer > theguess:
return 內建頁面.render(輸入="small", theanswer=theanswer)
else:
thecount = cherrypy.session.get('count')
return 內建頁面.render(輸入="exact", theanswer=theanswer, thecount=thecount)
@cherrypy.expose
def mytest(self):
套稿查詢 = TemplateLookup(directories=[template_root_dir+"/templates"])
# 必須要從 templates 目錄取出 mytest.html
內建頁面 = 套稿查詢.get_template("mytest.html")
return 內建頁面.render()
################################# 4. 程式啟動設定與執行
root = Guess()
application_conf = {# 設定靜態 templates 檔案目錄對應
'/templates':{
'tools.staticdir.on': True,
'tools.staticdir.root': template_root_dir,
'tools.staticdir.dir': 'templates',
'tools.staticdir.index' : 'index.htm'
}
}
if 'OPENSHIFT_REPO_DIR' in os.environ.keys():
# 表示在 OpenSfhit 執行
application = cherrypy.Application(root, config = application_conf)
else:
# 表示在近端執行
cherrypy.quickstart(root, config = application_conf)
|
polyaxon/polyaxon
|
refs/heads/master
|
core/polyaxon/vendor/pynvml.py
|
1
|
#####
# Copyright (c) 2011-2015, NVIDIA Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA Corporation nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#####
# isort: skip_file
##
# Python bindings for the NVML library
##
from ctypes import *
from ctypes.util import find_library
import sys
import os
import threading
import string
## C Type mappings ##
## Enums
_nvmlEnableState_t = c_uint
NVML_FEATURE_DISABLED = 0
NVML_FEATURE_ENABLED = 1
_nvmlBrandType_t = c_uint
NVML_BRAND_UNKNOWN = 0
NVML_BRAND_QUADRO = 1
NVML_BRAND_TESLA = 2
NVML_BRAND_NVS = 3
NVML_BRAND_GRID = 4
NVML_BRAND_GEFORCE = 5
NVML_BRAND_COUNT = 6
_nvmlTemperatureThresholds_t = c_uint
NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0
NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1
NVML_TEMPERATURE_THRESHOLD_COUNT = 1
_nvmlTemperatureSensors_t = c_uint
NVML_TEMPERATURE_GPU = 0
NVML_TEMPERATURE_COUNT = 1
_nvmlComputeMode_t = c_uint
NVML_COMPUTEMODE_DEFAULT = 0
NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1
NVML_COMPUTEMODE_PROHIBITED = 2
NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3
NVML_COMPUTEMODE_COUNT = 4
_nvmlMemoryLocation_t = c_uint
NVML_MEMORY_LOCATION_L1_CACHE = 0
NVML_MEMORY_LOCATION_L2_CACHE = 1
NVML_MEMORY_LOCATION_DEVICE_MEMORY = 2
NVML_MEMORY_LOCATION_REGISTER_FILE = 3
NVML_MEMORY_LOCATION_TEXTURE_MEMORY = 4
NVML_MEMORY_LOCATION_COUNT = 5
# These are deprecated, instead use _nvmlMemoryErrorType_t
_nvmlEccBitType_t = c_uint
NVML_SINGLE_BIT_ECC = 0
NVML_DOUBLE_BIT_ECC = 1
NVML_ECC_ERROR_TYPE_COUNT = 2
_nvmlEccCounterType_t = c_uint
NVML_VOLATILE_ECC = 0
NVML_AGGREGATE_ECC = 1
NVML_ECC_COUNTER_TYPE_COUNT = 2
_nvmlMemoryErrorType_t = c_uint
NVML_MEMORY_ERROR_TYPE_CORRECTED = 0
NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1
NVML_MEMORY_ERROR_TYPE_COUNT = 2
_nvmlClockType_t = c_uint
NVML_CLOCK_GRAPHICS = 0
NVML_CLOCK_SM = 1
NVML_CLOCK_MEM = 2
NVML_CLOCK_COUNT = 3
_nvmlDriverModel_t = c_uint
NVML_DRIVER_WDDM = 0
NVML_DRIVER_WDM = 1
_nvmlPstates_t = c_uint
NVML_PSTATE_0 = 0
NVML_PSTATE_1 = 1
NVML_PSTATE_2 = 2
NVML_PSTATE_3 = 3
NVML_PSTATE_4 = 4
NVML_PSTATE_5 = 5
NVML_PSTATE_6 = 6
NVML_PSTATE_7 = 7
NVML_PSTATE_8 = 8
NVML_PSTATE_9 = 9
NVML_PSTATE_10 = 10
NVML_PSTATE_11 = 11
NVML_PSTATE_12 = 12
NVML_PSTATE_13 = 13
NVML_PSTATE_14 = 14
NVML_PSTATE_15 = 15
NVML_PSTATE_UNKNOWN = 32
_nvmlInforomObject_t = c_uint
NVML_INFOROM_OEM = 0
NVML_INFOROM_ECC = 1
NVML_INFOROM_POWER = 2
NVML_INFOROM_COUNT = 3
_nvmlReturn_t = c_uint
NVML_SUCCESS = 0
NVML_ERROR_UNINITIALIZED = 1
NVML_ERROR_INVALID_ARGUMENT = 2
NVML_ERROR_NOT_SUPPORTED = 3
NVML_ERROR_NO_PERMISSION = 4
NVML_ERROR_ALREADY_INITIALIZED = 5
NVML_ERROR_NOT_FOUND = 6
NVML_ERROR_INSUFFICIENT_SIZE = 7
NVML_ERROR_INSUFFICIENT_POWER = 8
NVML_ERROR_DRIVER_NOT_LOADED = 9
NVML_ERROR_TIMEOUT = 10
NVML_ERROR_IRQ_ISSUE = 11
NVML_ERROR_LIBRARY_NOT_FOUND = 12
NVML_ERROR_FUNCTION_NOT_FOUND = 13
NVML_ERROR_CORRUPTED_INFOROM = 14
NVML_ERROR_GPU_IS_LOST = 15
NVML_ERROR_RESET_REQUIRED = 16
NVML_ERROR_OPERATING_SYSTEM = 17
NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18
NVML_ERROR_UNKNOWN = 999
_nvmlFanState_t = c_uint
NVML_FAN_NORMAL = 0
NVML_FAN_FAILED = 1
_nvmlLedColor_t = c_uint
NVML_LED_COLOR_GREEN = 0
NVML_LED_COLOR_AMBER = 1
_nvmlGpuOperationMode_t = c_uint
NVML_GOM_ALL_ON = 0
NVML_GOM_COMPUTE = 1
NVML_GOM_LOW_DP = 2
_nvmlPageRetirementCause_t = c_uint
NVML_PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR = 0
NVML_PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS = 1
NVML_PAGE_RETIREMENT_CAUSE_COUNT = 2
_nvmlRestrictedAPI_t = c_uint
NVML_RESTRICTED_API_SET_APPLICATION_CLOCKS = 0
NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS = 1
NVML_RESTRICTED_API_COUNT = 2
_nvmlBridgeChipType_t = c_uint
NVML_BRIDGE_CHIP_PLX = 0
NVML_BRIDGE_CHIP_BRO4 = 1
NVML_MAX_PHYSICAL_BRIDGE = 128
_nvmlValueType_t = c_uint
NVML_VALUE_TYPE_DOUBLE = 0
NVML_VALUE_TYPE_UNSIGNED_INT = 1
NVML_VALUE_TYPE_UNSIGNED_LONG = 2
NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3
NVML_VALUE_TYPE_COUNT = 4
_nvmlPerfPolicyType_t = c_uint
NVML_PERF_POLICY_POWER = 0
NVML_PERF_POLICY_THERMAL = 1
NVML_PERF_POLICY_COUNT = 2
_nvmlSamplingType_t = c_uint
NVML_TOTAL_POWER_SAMPLES = 0
NVML_GPU_UTILIZATION_SAMPLES = 1
NVML_MEMORY_UTILIZATION_SAMPLES = 2
NVML_ENC_UTILIZATION_SAMPLES = 3
NVML_DEC_UTILIZATION_SAMPLES = 4
NVML_PROCESSOR_CLK_SAMPLES = 5
NVML_MEMORY_CLK_SAMPLES = 6
NVML_SAMPLINGTYPE_COUNT = 7
_nvmlPcieUtilCounter_t = c_uint
NVML_PCIE_UTIL_TX_BYTES = 0
NVML_PCIE_UTIL_RX_BYTES = 1
NVML_PCIE_UTIL_COUNT = 2
_nvmlGpuTopologyLevel_t = c_uint
NVML_TOPOLOGY_INTERNAL = 0
NVML_TOPOLOGY_SINGLE = 10
NVML_TOPOLOGY_MULTIPLE = 20
NVML_TOPOLOGY_HOSTBRIDGE = 30
NVML_TOPOLOGY_CPU = 40
NVML_TOPOLOGY_SYSTEM = 50
# C preprocessor defined values
nvmlFlagDefault = 0
nvmlFlagForce = 1
# buffer size
NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE = 16
NVML_DEVICE_UUID_BUFFER_SIZE = 80
NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE = 81
NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE = 80
NVML_DEVICE_NAME_BUFFER_SIZE = 64
NVML_DEVICE_SERIAL_BUFFER_SIZE = 30
NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE = 32
NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE = 16
NVML_VALUE_NOT_AVAILABLE_ulonglong = c_ulonglong(-1)
NVML_VALUE_NOT_AVAILABLE_uint = c_uint(-1)
## Lib loading ##
nvmlLib = None
libLoadLock = threading.Lock()
_nvmlLib_refcount = 0 # Incremented on each nvmlInit and decremented on nvmlShutdown
## Error Checking ##
class NVMLError(Exception):
_valClassMapping = dict()
# List of currently known error codes
_errcode_to_string = {
NVML_ERROR_UNINITIALIZED: "Uninitialized",
NVML_ERROR_INVALID_ARGUMENT: "Invalid Argument",
NVML_ERROR_NOT_SUPPORTED: "Not Supported",
NVML_ERROR_NO_PERMISSION: "Insufficient Permissions",
NVML_ERROR_ALREADY_INITIALIZED: "Already Initialized",
NVML_ERROR_NOT_FOUND: "Not Found",
NVML_ERROR_INSUFFICIENT_SIZE: "Insufficient Size",
NVML_ERROR_INSUFFICIENT_POWER: "Insufficient External Power",
NVML_ERROR_DRIVER_NOT_LOADED: "Driver Not Loaded",
NVML_ERROR_TIMEOUT: "Timeout",
NVML_ERROR_IRQ_ISSUE: "Interrupt Request Issue",
NVML_ERROR_LIBRARY_NOT_FOUND: "NVML Shared Library Not Found",
NVML_ERROR_FUNCTION_NOT_FOUND: "Function Not Found",
NVML_ERROR_CORRUPTED_INFOROM: "Corrupted infoROM",
NVML_ERROR_GPU_IS_LOST: "GPU is lost",
NVML_ERROR_RESET_REQUIRED: "GPU requires restart",
NVML_ERROR_OPERATING_SYSTEM: "The operating system has blocked the request.",
NVML_ERROR_LIB_RM_VERSION_MISMATCH: "RM has detected an NVML/RM version mismatch.",
NVML_ERROR_UNKNOWN: "Unknown Error",
}
def __new__(typ, value):
'''
Maps value to a proper subclass of NVMLError.
See _extractNVMLErrorsAsClasses function for more details
'''
if typ == NVMLError:
typ = NVMLError._valClassMapping.get(value, typ)
obj = Exception.__new__(typ)
obj.value = value
return obj
def __str__(self):
try:
if self.value not in NVMLError._errcode_to_string:
NVMLError._errcode_to_string[self.value] = str(nvmlErrorString(self.value))
return NVMLError._errcode_to_string[self.value]
except NVMLError_Uninitialized:
return "NVML Error with code %d" % self.value
def __eq__(self, other):
return self.value == other.value
def _extractNVMLErrorsAsClasses():
'''
Generates a hierarchy of classes on top of NVMLError class.
Each NVML Error gets a new NVMLError subclass. This way try,except blocks can filter appropriate
exceptions more easily.
NVMLError is a parent class. Each NVML_ERROR_* gets it's own subclass.
e.g. NVML_ERROR_ALREADY_INITIALIZED will be turned into NVMLError_AlreadyInitialized
'''
this_module = sys.modules[__name__]
nvmlErrorsNames = filter(lambda x: x.startswith("NVML_ERROR_"), dir(this_module))
for err_name in nvmlErrorsNames:
# e.g. Turn NVML_ERROR_ALREADY_INITIALIZED into NVMLError_AlreadyInitialized
class_name = "NVMLError_" + string.capwords(err_name.replace("NVML_ERROR_", ""), "_").replace("_", "")
err_val = getattr(this_module, err_name)
def gen_new(val):
def new(typ):
obj = NVMLError.__new__(typ, val)
return obj
return new
new_error_class = type(class_name, (NVMLError,), {'__new__': gen_new(err_val)})
new_error_class.__module__ = __name__
setattr(this_module, class_name, new_error_class)
NVMLError._valClassMapping[err_val] = new_error_class
_extractNVMLErrorsAsClasses()
def _nvmlCheckReturn(ret):
if (ret != NVML_SUCCESS):
raise NVMLError(ret)
return ret
## Function access ##
_nvmlGetFunctionPointer_cache = dict() # function pointers are cached to prevent unnecessary libLoadLock locking
def _nvmlGetFunctionPointer(name):
global nvmlLib
if name in _nvmlGetFunctionPointer_cache:
return _nvmlGetFunctionPointer_cache[name]
libLoadLock.acquire()
try:
# ensure library was loaded
if (nvmlLib == None):
raise NVMLError(NVML_ERROR_UNINITIALIZED)
try:
_nvmlGetFunctionPointer_cache[name] = getattr(nvmlLib, name)
return _nvmlGetFunctionPointer_cache[name]
except AttributeError:
raise NVMLError(NVML_ERROR_FUNCTION_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
## Alternative object
# Allows the object to be printed
# Allows mismatched types to be assigned
# - like None when the Structure variant requires c_uint
class nvmlFriendlyObject(object):
def __init__(self, dictionary):
for x in dictionary:
setattr(self, x, dictionary[x])
def __str__(self):
return self.__dict__.__str__()
def nvmlStructToFriendlyObject(struct):
d = {}
for x in struct._fields_:
key = x[0]
value = getattr(struct, key)
d[key] = value
obj = nvmlFriendlyObject(d)
return obj
# pack the object so it can be passed to the NVML library
def nvmlFriendlyObjectToStruct(obj, model):
for x in model._fields_:
key = x[0]
value = obj.__dict__[key]
setattr(model, key, value)
return model
## Unit structures
class struct_c_nvmlUnit_t(Structure):
pass # opaque handle
c_nvmlUnit_t = POINTER(struct_c_nvmlUnit_t)
class _PrintableStructure(Structure):
"""
Abstract class that produces nicer __str__ output than ctypes.Structure.
e.g. instead of:
>>> print str(obj)
<class_name object at 0x7fdf82fef9e0>
this class will print
class_name(field_name: formatted_value, field_name: formatted_value)
_fmt_ dictionary of <str _field_ name> -> <str format>
e.g. class that has _field_ 'hex_value', c_uint could be formatted with
_fmt_ = {"hex_value" : "%08X"}
to produce nicer output.
Default fomratting string for all fields can be set with key "<default>" like:
_fmt_ = {"<default>" : "%d MHz"} # e.g all values are numbers in MHz.
If not set it's assumed to be just "%s"
Exact format of returned str from this class is subject to change in the future.
"""
_fmt_ = {}
def __str__(self):
result = []
for x in self._fields_:
key = x[0]
value = getattr(self, key)
fmt = "%s"
if key in self._fmt_:
fmt = self._fmt_[key]
elif "<default>" in self._fmt_:
fmt = self._fmt_["<default>"]
result.append(("%s: " + fmt) % (key, value))
return self.__class__.__name__ + "(" + string.join(result, ", ") + ")"
class c_nvmlUnitInfo_t(_PrintableStructure):
_fields_ = [
('name', c_char * 96),
('id', c_char * 96),
('serial', c_char * 96),
('firmwareVersion', c_char * 96),
]
class c_nvmlLedState_t(_PrintableStructure):
_fields_ = [
('cause', c_char * 256),
('color', _nvmlLedColor_t),
]
class c_nvmlPSUInfo_t(_PrintableStructure):
_fields_ = [
('state', c_char * 256),
('current', c_uint),
('voltage', c_uint),
('power', c_uint),
]
class c_nvmlUnitFanInfo_t(_PrintableStructure):
_fields_ = [
('speed', c_uint),
('state', _nvmlFanState_t),
]
class c_nvmlUnitFanSpeeds_t(_PrintableStructure):
_fields_ = [
('fans', c_nvmlUnitFanInfo_t * 24),
('count', c_uint)
]
## Device structures
class struct_c_nvmlDevice_t(Structure):
pass # opaque handle
c_nvmlDevice_t = POINTER(struct_c_nvmlDevice_t)
class nvmlPciInfo_t(_PrintableStructure):
_fields_ = [
('busId', c_char * 16),
('domain', c_uint),
('bus', c_uint),
('device', c_uint),
('pciDeviceId', c_uint),
# Added in 2.285
('pciSubSystemId', c_uint),
('reserved0', c_uint),
('reserved1', c_uint),
('reserved2', c_uint),
('reserved3', c_uint),
]
_fmt_ = {
'domain' : "0x%04X",
'bus' : "0x%02X",
'device' : "0x%02X",
'pciDeviceId' : "0x%08X",
'pciSubSystemId' : "0x%08X",
}
class c_nvmlMemory_t(_PrintableStructure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
_fmt_ = {'<default>': "%d B"}
class c_nvmlBAR1Memory_t(_PrintableStructure):
_fields_ = [
('bar1Total', c_ulonglong),
('bar1Free', c_ulonglong),
('bar1Used', c_ulonglong),
]
_fmt_ = {'<default>': "%d B"}
# On Windows with the WDDM driver, usedGpuMemory is reported as None
# Code that processes this structure should check for None, I.E.
#
# if (info.usedGpuMemory == None):
# # TODO handle the error
# pass
# else:
# print("Using %d MiB of memory" % (info.usedGpuMemory / 1024 / 1024))
#
# See NVML documentation for more information
class c_nvmlProcessInfo_t(_PrintableStructure):
_fields_ = [
('pid', c_uint),
('usedGpuMemory', c_ulonglong),
]
_fmt_ = {'usedGpuMemory': "%d B"}
class c_nvmlBridgeChipInfo_t(_PrintableStructure):
_fields_ = [
('type', _nvmlBridgeChipType_t),
('fwVersion', c_uint),
]
class c_nvmlBridgeChipHierarchy_t(_PrintableStructure):
_fields_ = [
('bridgeCount', c_uint),
('bridgeChipInfo', c_nvmlBridgeChipInfo_t * 128),
]
class c_nvmlEccErrorCounts_t(_PrintableStructure):
_fields_ = [
('l1Cache', c_ulonglong),
('l2Cache', c_ulonglong),
('deviceMemory', c_ulonglong),
('registerFile', c_ulonglong),
]
class c_nvmlUtilization_t(_PrintableStructure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
_fmt_ = {'<default>': "%d %%"}
# Added in 2.285
class c_nvmlHwbcEntry_t(_PrintableStructure):
_fields_ = [
('hwbcId', c_uint),
('firmwareVersion', c_char * 32),
]
class c_nvmlValue_t(Union):
_fields_ = [
('dVal', c_double),
('uiVal', c_uint),
('ulVal', c_ulong),
('ullVal', c_ulonglong),
]
class c_nvmlSample_t(_PrintableStructure):
_fields_ = [
('timeStamp', c_ulonglong),
('sampleValue', c_nvmlValue_t),
]
class c_nvmlViolationTime_t(_PrintableStructure):
_fields_ = [
('referenceTime', c_ulonglong),
('violationTime', c_ulonglong),
]
## Event structures
class struct_c_nvmlEventSet_t(Structure):
pass # opaque handle
c_nvmlEventSet_t = POINTER(struct_c_nvmlEventSet_t)
nvmlEventTypeSingleBitEccError = 0x0000000000000001
nvmlEventTypeDoubleBitEccError = 0x0000000000000002
nvmlEventTypePState = 0x0000000000000004
nvmlEventTypeXidCriticalError = 0x0000000000000008
nvmlEventTypeClock = 0x0000000000000010
nvmlEventTypeNone = 0x0000000000000000
nvmlEventTypeAll = (
nvmlEventTypeNone |
nvmlEventTypeSingleBitEccError |
nvmlEventTypeDoubleBitEccError |
nvmlEventTypePState |
nvmlEventTypeClock |
nvmlEventTypeXidCriticalError
)
## Clock Throttle Reasons defines
nvmlClocksThrottleReasonGpuIdle = 0x0000000000000001
nvmlClocksThrottleReasonApplicationsClocksSetting = 0x0000000000000002
nvmlClocksThrottleReasonUserDefinedClocks = nvmlClocksThrottleReasonApplicationsClocksSetting # deprecated, use nvmlClocksThrottleReasonApplicationsClocksSetting
nvmlClocksThrottleReasonSwPowerCap = 0x0000000000000004
nvmlClocksThrottleReasonHwSlowdown = 0x0000000000000008
nvmlClocksThrottleReasonUnknown = 0x8000000000000000
nvmlClocksThrottleReasonNone = 0x0000000000000000
nvmlClocksThrottleReasonAll = (
nvmlClocksThrottleReasonNone |
nvmlClocksThrottleReasonGpuIdle |
nvmlClocksThrottleReasonApplicationsClocksSetting |
nvmlClocksThrottleReasonSwPowerCap |
nvmlClocksThrottleReasonHwSlowdown |
nvmlClocksThrottleReasonUnknown
)
class c_nvmlEventData_t(_PrintableStructure):
_fields_ = [
('device', c_nvmlDevice_t),
('eventType', c_ulonglong),
('eventData', c_ulonglong)
]
_fmt_ = {'eventType': "0x%08X"}
class c_nvmlAccountingStats_t(_PrintableStructure):
_fields_ = [
('gpuUtilization', c_uint),
('memoryUtilization', c_uint),
('maxMemoryUsage', c_ulonglong),
('time', c_ulonglong),
('startTime', c_ulonglong),
('isRunning', c_uint),
('reserved', c_uint * 5)
]
## C function wrappers ##
def nvmlInit():
_LoadNvmlLibrary()
#
# Initialize the library
#
fn = _nvmlGetFunctionPointer("nvmlInit_v2")
ret = fn()
_nvmlCheckReturn(ret)
# Atomically update refcount
global _nvmlLib_refcount
libLoadLock.acquire()
_nvmlLib_refcount += 1
libLoadLock.release()
return None
def _LoadNvmlLibrary():
'''
Load the library if it isn't loaded already
'''
global nvmlLib
if (nvmlLib == None):
# lock to ensure only one caller loads the library
libLoadLock.acquire()
try:
# ensure the library still isn't loaded
if (nvmlLib == None):
try:
if (sys.platform[:3] == "win"):
# cdecl calling convention
# load nvml.dll from %ProgramFiles%/NVIDIA Corporation/NVSMI/nvml.dll
nvmlLib = CDLL(os.path.join(os.getenv("ProgramFiles", "C:/Program Files"), "NVIDIA Corporation/NVSMI/nvml.dll"))
else:
# assume linux
nvmlLib = CDLL("libnvidia-ml.so.1")
except OSError as ose:
_nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND)
if (nvmlLib == None):
_nvmlCheckReturn(NVML_ERROR_LIBRARY_NOT_FOUND)
finally:
# lock is always freed
libLoadLock.release()
def nvmlShutdown():
#
# Leave the library loaded, but shutdown the interface
#
fn = _nvmlGetFunctionPointer("nvmlShutdown")
ret = fn()
_nvmlCheckReturn(ret)
# Atomically update refcount
global _nvmlLib_refcount
libLoadLock.acquire()
if (0 < _nvmlLib_refcount):
_nvmlLib_refcount -= 1
libLoadLock.release()
return None
# Added in 2.285
def nvmlErrorString(result):
fn = _nvmlGetFunctionPointer("nvmlErrorString")
fn.restype = c_char_p # otherwise return is an int
ret = fn(result)
return ret
# Added in 2.285
def nvmlSystemGetNVMLVersion():
c_version = create_string_buffer(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlSystemGetNVMLVersion")
ret = fn(c_version, c_uint(NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 2.285
def nvmlSystemGetProcessName(pid):
c_name = create_string_buffer(1024)
fn = _nvmlGetFunctionPointer("nvmlSystemGetProcessName")
ret = fn(c_uint(pid), c_name, c_uint(1024))
_nvmlCheckReturn(ret)
return c_name.value
def nvmlSystemGetDriverVersion():
c_version = create_string_buffer(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlSystemGetDriverVersion")
ret = fn(c_version, c_uint(NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 2.285
def nvmlSystemGetHicVersion():
c_count = c_uint(0)
hics = None
fn = _nvmlGetFunctionPointer("nvmlSystemGetHicVersion")
# get the count
ret = fn(byref(c_count), None)
# this should only fail with insufficient size
if ((ret != NVML_SUCCESS) and
(ret != NVML_ERROR_INSUFFICIENT_SIZE)):
raise NVMLError(ret)
# if there are no hics
if (c_count.value == 0):
return []
hic_array = c_nvmlHwbcEntry_t * c_count.value
hics = hic_array()
ret = fn(byref(c_count), hics)
_nvmlCheckReturn(ret)
return hics
## Unit get functions
def nvmlUnitGetCount():
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlUnitGetCount")
ret = fn(byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlUnitGetHandleByIndex(index):
c_index = c_uint(index)
unit = c_nvmlUnit_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetHandleByIndex")
ret = fn(c_index, byref(unit))
_nvmlCheckReturn(ret)
return unit
def nvmlUnitGetUnitInfo(unit):
c_info = c_nvmlUnitInfo_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetUnitInfo")
ret = fn(unit, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlUnitGetLedState(unit):
c_state = c_nvmlLedState_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetLedState")
ret = fn(unit, byref(c_state))
_nvmlCheckReturn(ret)
return c_state
def nvmlUnitGetPsuInfo(unit):
c_info = c_nvmlPSUInfo_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetPsuInfo")
ret = fn(unit, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlUnitGetTemperature(unit, type):
c_temp = c_uint()
fn = _nvmlGetFunctionPointer("nvmlUnitGetTemperature")
ret = fn(unit, c_uint(type), byref(c_temp))
_nvmlCheckReturn(ret)
return c_temp.value
def nvmlUnitGetFanSpeedInfo(unit):
c_speeds = c_nvmlUnitFanSpeeds_t()
fn = _nvmlGetFunctionPointer("nvmlUnitGetFanSpeedInfo")
ret = fn(unit, byref(c_speeds))
_nvmlCheckReturn(ret)
return c_speeds
# added to API
def nvmlUnitGetDeviceCount(unit):
c_count = c_uint(0)
# query the unit to determine device count
fn = _nvmlGetFunctionPointer("nvmlUnitGetDevices")
ret = fn(unit, byref(c_count), None)
if (ret == NVML_ERROR_INSUFFICIENT_SIZE):
ret = NVML_SUCCESS
_nvmlCheckReturn(ret)
return c_count.value
def nvmlUnitGetDevices(unit):
c_count = c_uint(nvmlUnitGetDeviceCount(unit))
device_array = c_nvmlDevice_t * c_count.value
c_devices = device_array()
fn = _nvmlGetFunctionPointer("nvmlUnitGetDevices")
ret = fn(unit, byref(c_count), c_devices)
_nvmlCheckReturn(ret)
return c_devices
## Device get functions
def nvmlDeviceGetCount():
c_count = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCount_v2")
ret = fn(byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlDeviceGetHandleByIndex(index):
c_index = c_uint(index)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByIndex_v2")
ret = fn(c_index, byref(device))
_nvmlCheckReturn(ret)
return device
def nvmlDeviceGetHandleBySerial(serial):
c_serial = c_char_p(serial)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleBySerial")
ret = fn(c_serial, byref(device))
_nvmlCheckReturn(ret)
return device
def nvmlDeviceGetHandleByUUID(uuid):
c_uuid = c_char_p(uuid)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByUUID")
ret = fn(c_uuid, byref(device))
_nvmlCheckReturn(ret)
return device
def nvmlDeviceGetHandleByPciBusId(pciBusId):
c_busId = c_char_p(pciBusId)
device = c_nvmlDevice_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetHandleByPciBusId_v2")
ret = fn(c_busId, byref(device))
_nvmlCheckReturn(ret)
return device
def nvmlDeviceGetName(handle):
c_name = create_string_buffer(NVML_DEVICE_NAME_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetName")
ret = fn(handle, c_name, c_uint(NVML_DEVICE_NAME_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_name.value
def nvmlDeviceGetBoardId(handle):
c_id = c_uint();
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardId")
ret = fn(handle, byref(c_id))
_nvmlCheckReturn(ret)
return c_id.value
def nvmlDeviceGetMultiGpuBoard(handle):
c_multiGpu = c_uint();
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMultiGpuBoard")
ret = fn(handle, byref(c_multiGpu))
_nvmlCheckReturn(ret)
return c_multiGpu.value
def nvmlDeviceGetBrand(handle):
c_type = _nvmlBrandType_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBrand")
ret = fn(handle, byref(c_type))
_nvmlCheckReturn(ret)
return c_type.value
def nvmlDeviceGetSerial(handle):
c_serial = create_string_buffer(NVML_DEVICE_SERIAL_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSerial")
ret = fn(handle, c_serial, c_uint(NVML_DEVICE_SERIAL_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_serial.value
def nvmlDeviceGetCpuAffinity(handle, cpuSetSize):
affinity_array = c_ulonglong * cpuSetSize
c_affinity = affinity_array()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCpuAffinity")
ret = fn(handle, cpuSetSize, byref(c_affinity))
_nvmlCheckReturn(ret)
return c_affinity
def nvmlDeviceSetCpuAffinity(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetCpuAffinity")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceClearCpuAffinity(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceClearCpuAffinity")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetMinorNumber(handle):
c_minor_number = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMinorNumber")
ret = fn(handle, byref(c_minor_number))
_nvmlCheckReturn(ret)
return c_minor_number.value
def nvmlDeviceGetUUID(handle):
c_uuid = create_string_buffer(NVML_DEVICE_UUID_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetUUID")
ret = fn(handle, c_uuid, c_uint(NVML_DEVICE_UUID_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_uuid.value
def nvmlDeviceGetInforomVersion(handle, infoRomObject):
c_version = create_string_buffer(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomVersion")
ret = fn(handle, _nvmlInforomObject_t(infoRomObject),
c_version, c_uint(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 4.304
def nvmlDeviceGetInforomImageVersion(handle):
c_version = create_string_buffer(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomImageVersion")
ret = fn(handle, c_version, c_uint(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 4.304
def nvmlDeviceGetInforomConfigurationChecksum(handle):
c_checksum = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomConfigurationChecksum")
ret = fn(handle, byref(c_checksum))
_nvmlCheckReturn(ret)
return c_checksum.value
# Added in 4.304
def nvmlDeviceValidateInforom(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceValidateInforom")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetDisplayMode(handle):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDisplayMode")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceGetDisplayActive(handle):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDisplayActive")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceGetPersistenceMode(handle):
c_state = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPersistenceMode")
ret = fn(handle, byref(c_state))
_nvmlCheckReturn(ret)
return c_state.value
def nvmlDeviceGetPciInfo(handle):
c_info = nvmlPciInfo_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPciInfo_v2")
ret = fn(handle, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlDeviceGetClockInfo(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetClockInfo")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 2.285
def nvmlDeviceGetMaxClockInfo(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxClockInfo")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 4.304
def nvmlDeviceGetApplicationsClock(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetApplicationsClock")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 5.319
def nvmlDeviceGetDefaultApplicationsClock(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDefaultApplicationsClock")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 4.304
def nvmlDeviceGetSupportedMemoryClocks(handle):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedMemoryClocks")
ret = fn(handle, byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no clocks
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
clocks_array = c_uint * c_count.value
c_clocks = clocks_array()
# make the call again
ret = fn(handle, byref(c_count), c_clocks)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
procs.append(c_clocks[i])
return procs
else:
# error case
raise NVMLError(ret)
# Added in 4.304
def nvmlDeviceGetSupportedGraphicsClocks(handle, memoryClockMHz):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedGraphicsClocks")
ret = fn(handle, c_uint(memoryClockMHz), byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no clocks
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
clocks_array = c_uint * c_count.value
c_clocks = clocks_array()
# make the call again
ret = fn(handle, c_uint(memoryClockMHz), byref(c_count), c_clocks)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
procs.append(c_clocks[i])
return procs
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetFanSpeed(handle):
c_speed = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanSpeed")
ret = fn(handle, byref(c_speed))
_nvmlCheckReturn(ret)
return c_speed.value
def nvmlDeviceGetTemperature(handle, sensor):
c_temp = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTemperature")
ret = fn(handle, _nvmlTemperatureSensors_t(sensor), byref(c_temp))
_nvmlCheckReturn(ret)
return c_temp.value
def nvmlDeviceGetTemperatureThreshold(handle, threshold):
c_temp = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTemperatureThreshold")
ret = fn(handle, _nvmlTemperatureThresholds_t(threshold), byref(c_temp))
_nvmlCheckReturn(ret)
return c_temp.value
# DEPRECATED use nvmlDeviceGetPerformanceState
def nvmlDeviceGetPowerState(handle):
c_pstate = _nvmlPstates_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerState")
ret = fn(handle, byref(c_pstate))
_nvmlCheckReturn(ret)
return c_pstate.value
def nvmlDeviceGetPerformanceState(handle):
c_pstate = _nvmlPstates_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPerformanceState")
ret = fn(handle, byref(c_pstate))
_nvmlCheckReturn(ret)
return c_pstate.value
def nvmlDeviceGetPowerManagementMode(handle):
c_pcapMode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementMode")
ret = fn(handle, byref(c_pcapMode))
_nvmlCheckReturn(ret)
return c_pcapMode.value
def nvmlDeviceGetPowerManagementLimit(handle):
c_limit = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementLimit")
ret = fn(handle, byref(c_limit))
_nvmlCheckReturn(ret)
return c_limit.value
# Added in 4.304
def nvmlDeviceGetPowerManagementLimitConstraints(handle):
c_minLimit = c_uint()
c_maxLimit = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementLimitConstraints")
ret = fn(handle, byref(c_minLimit), byref(c_maxLimit))
_nvmlCheckReturn(ret)
return [c_minLimit.value, c_maxLimit.value]
# Added in 4.304
def nvmlDeviceGetPowerManagementDefaultLimit(handle):
c_limit = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerManagementDefaultLimit")
ret = fn(handle, byref(c_limit))
_nvmlCheckReturn(ret)
return c_limit.value
# Added in 331
def nvmlDeviceGetEnforcedPowerLimit(handle):
c_limit = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEnforcedPowerLimit")
ret = fn(handle, byref(c_limit))
_nvmlCheckReturn(ret)
return c_limit.value
def nvmlDeviceGetPowerUsage(handle):
c_watts = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPowerUsage")
ret = fn(handle, byref(c_watts))
_nvmlCheckReturn(ret)
return c_watts.value
# Added in 4.304
def nvmlDeviceGetGpuOperationMode(handle):
c_currState = _nvmlGpuOperationMode_t()
c_pendingState = _nvmlGpuOperationMode_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGpuOperationMode")
ret = fn(handle, byref(c_currState), byref(c_pendingState))
_nvmlCheckReturn(ret)
return [c_currState.value, c_pendingState.value]
# Added in 4.304
def nvmlDeviceGetCurrentGpuOperationMode(handle):
return nvmlDeviceGetGpuOperationMode(handle)[0]
# Added in 4.304
def nvmlDeviceGetPendingGpuOperationMode(handle):
return nvmlDeviceGetGpuOperationMode(handle)[1]
def nvmlDeviceGetMemoryInfo(handle):
c_memory = c_nvmlMemory_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryInfo")
ret = fn(handle, byref(c_memory))
_nvmlCheckReturn(ret)
return c_memory
def nvmlDeviceGetBAR1MemoryInfo(handle):
c_bar1_memory = c_nvmlBAR1Memory_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBAR1MemoryInfo")
ret = fn(handle, byref(c_bar1_memory))
_nvmlCheckReturn(ret)
return c_bar1_memory
def nvmlDeviceGetComputeMode(handle):
c_mode = _nvmlComputeMode_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeMode")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceGetEccMode(handle):
c_currState = _nvmlEnableState_t()
c_pendingState = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEccMode")
ret = fn(handle, byref(c_currState), byref(c_pendingState))
_nvmlCheckReturn(ret)
return [c_currState.value, c_pendingState.value]
# added to API
def nvmlDeviceGetCurrentEccMode(handle):
return nvmlDeviceGetEccMode(handle)[0]
# added to API
def nvmlDeviceGetPendingEccMode(handle):
return nvmlDeviceGetEccMode(handle)[1]
def nvmlDeviceGetTotalEccErrors(handle, errorType, counterType):
c_count = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTotalEccErrors")
ret = fn(handle, _nvmlMemoryErrorType_t(errorType),
_nvmlEccCounterType_t(counterType), byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
# This is deprecated, instead use nvmlDeviceGetMemoryErrorCounter
def nvmlDeviceGetDetailedEccErrors(handle, errorType, counterType):
c_counts = c_nvmlEccErrorCounts_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDetailedEccErrors")
ret = fn(handle, _nvmlMemoryErrorType_t(errorType),
_nvmlEccCounterType_t(counterType), byref(c_counts))
_nvmlCheckReturn(ret)
return c_counts
# Added in 4.304
def nvmlDeviceGetMemoryErrorCounter(handle, errorType, counterType, locationType):
c_count = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryErrorCounter")
ret = fn(handle,
_nvmlMemoryErrorType_t(errorType),
_nvmlEccCounterType_t(counterType),
_nvmlMemoryLocation_t(locationType),
byref(c_count))
_nvmlCheckReturn(ret)
return c_count.value
def nvmlDeviceGetUtilizationRates(handle):
c_util = c_nvmlUtilization_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetUtilizationRates")
ret = fn(handle, byref(c_util))
_nvmlCheckReturn(ret)
return c_util
def nvmlDeviceGetEncoderUtilization(handle):
c_util = c_uint()
c_samplingPeriod = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetEncoderUtilization")
ret = fn(handle, byref(c_util), byref(c_samplingPeriod))
_nvmlCheckReturn(ret)
return [c_util.value, c_samplingPeriod.value]
def nvmlDeviceGetDecoderUtilization(handle):
c_util = c_uint()
c_samplingPeriod = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDecoderUtilization")
ret = fn(handle, byref(c_util), byref(c_samplingPeriod))
_nvmlCheckReturn(ret)
return [c_util.value, c_samplingPeriod.value]
def nvmlDeviceGetPcieReplayCounter(handle):
c_replay = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieReplayCounter")
ret = fn(handle, byref(c_replay))
_nvmlCheckReturn(ret)
return c_replay.value
def nvmlDeviceGetDriverModel(handle):
c_currModel = _nvmlDriverModel_t()
c_pendingModel = _nvmlDriverModel_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDriverModel")
ret = fn(handle, byref(c_currModel), byref(c_pendingModel))
_nvmlCheckReturn(ret)
return [c_currModel.value, c_pendingModel.value]
# added to API
def nvmlDeviceGetCurrentDriverModel(handle):
return nvmlDeviceGetDriverModel(handle)[0]
# added to API
def nvmlDeviceGetPendingDriverModel(handle):
return nvmlDeviceGetDriverModel(handle)[1]
# Added in 2.285
def nvmlDeviceGetVbiosVersion(handle):
c_version = create_string_buffer(NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetVbiosVersion")
ret = fn(handle, c_version, c_uint(NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 2.285
def nvmlDeviceGetComputeRunningProcesses(handle):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetComputeRunningProcesses")
ret = fn(handle, byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no running processes
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
# oversize the array incase more processes are created
c_count.value = c_count.value * 2 + 5
proc_array = c_nvmlProcessInfo_t * c_count.value
c_procs = proc_array()
# make the call again
ret = fn(handle, byref(c_count), c_procs)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
# use an alternative struct for this object
obj = nvmlStructToFriendlyObject(c_procs[i])
if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value):
# special case for WDDM on Windows, see comment above
obj.usedGpuMemory = None
procs.append(obj)
return procs
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetGraphicsRunningProcesses(handle):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetGraphicsRunningProcesses")
ret = fn(handle, byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no running processes
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
# oversize the array incase more processes are created
c_count.value = c_count.value * 2 + 5
proc_array = c_nvmlProcessInfo_t * c_count.value
c_procs = proc_array()
# make the call again
ret = fn(handle, byref(c_count), c_procs)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
# use an alternative struct for this object
obj = nvmlStructToFriendlyObject(c_procs[i])
if (obj.usedGpuMemory == NVML_VALUE_NOT_AVAILABLE_ulonglong.value):
# special case for WDDM on Windows, see comment above
obj.usedGpuMemory = None
procs.append(obj)
return procs
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetAutoBoostedClocksEnabled(handle):
c_isEnabled = _nvmlEnableState_t()
c_defaultIsEnabled = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAutoBoostedClocksEnabled")
ret = fn(handle, byref(c_isEnabled), byref(c_defaultIsEnabled))
_nvmlCheckReturn(ret)
return [c_isEnabled.value, c_defaultIsEnabled.value]
#Throws NVML_ERROR_NOT_SUPPORTED if hardware doesn't support setting auto boosted clocks
## Set functions
def nvmlUnitSetLedState(unit, color):
fn = _nvmlGetFunctionPointer("nvmlUnitSetLedState")
ret = fn(unit, _nvmlLedColor_t(color))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetPersistenceMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetPersistenceMode")
ret = fn(handle, _nvmlEnableState_t(mode))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetComputeMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetComputeMode")
ret = fn(handle, _nvmlComputeMode_t(mode))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetEccMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetEccMode")
ret = fn(handle, _nvmlEnableState_t(mode))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceClearEccErrorCounts(handle, counterType):
fn = _nvmlGetFunctionPointer("nvmlDeviceClearEccErrorCounts")
ret = fn(handle, _nvmlEccCounterType_t(counterType))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetDriverModel(handle, model):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetDriverModel")
ret = fn(handle, _nvmlDriverModel_t(model))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceSetAutoBoostedClocksEnabled(handle, enabled):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetAutoBoostedClocksEnabled")
ret = fn(handle, _nvmlEnableState_t(enabled))
_nvmlCheckReturn(ret)
return None
#Throws NVML_ERROR_NOT_SUPPORTED if hardware doesn't support setting auto boosted clocks
def nvmlDeviceSetDefaultAutoBoostedClocksEnabled(handle, enabled, flags):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetDefaultAutoBoostedClocksEnabled")
ret = fn(handle, _nvmlEnableState_t(enabled), c_uint(flags))
_nvmlCheckReturn(ret)
return None
#Throws NVML_ERROR_NOT_SUPPORTED if hardware doesn't support setting auto boosted clocks
# Added in 4.304
def nvmlDeviceSetApplicationsClocks(handle, maxMemClockMHz, maxGraphicsClockMHz):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetApplicationsClocks")
ret = fn(handle, c_uint(maxMemClockMHz), c_uint(maxGraphicsClockMHz))
_nvmlCheckReturn(ret)
return None
# Added in 4.304
def nvmlDeviceResetApplicationsClocks(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceResetApplicationsClocks")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
# Added in 4.304
def nvmlDeviceSetPowerManagementLimit(handle, limit):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetPowerManagementLimit")
ret = fn(handle, c_uint(limit))
_nvmlCheckReturn(ret)
return None
# Added in 4.304
def nvmlDeviceSetGpuOperationMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetGpuOperationMode")
ret = fn(handle, _nvmlGpuOperationMode_t(mode))
_nvmlCheckReturn(ret)
return None
# Added in 2.285
def nvmlEventSetCreate():
fn = _nvmlGetFunctionPointer("nvmlEventSetCreate")
eventSet = c_nvmlEventSet_t()
ret = fn(byref(eventSet))
_nvmlCheckReturn(ret)
return eventSet
# Added in 2.285
def nvmlDeviceRegisterEvents(handle, eventTypes, eventSet):
fn = _nvmlGetFunctionPointer("nvmlDeviceRegisterEvents")
ret = fn(handle, c_ulonglong(eventTypes), eventSet)
_nvmlCheckReturn(ret)
return None
# Added in 2.285
def nvmlDeviceGetSupportedEventTypes(handle):
c_eventTypes = c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedEventTypes")
ret = fn(handle, byref(c_eventTypes))
_nvmlCheckReturn(ret)
return c_eventTypes.value
# Added in 2.285
# raises NVML_ERROR_TIMEOUT exception on timeout
def nvmlEventSetWait(eventSet, timeoutms):
fn = _nvmlGetFunctionPointer("nvmlEventSetWait")
data = c_nvmlEventData_t()
ret = fn(eventSet, byref(data), c_uint(timeoutms))
_nvmlCheckReturn(ret)
return data
# Added in 2.285
def nvmlEventSetFree(eventSet):
fn = _nvmlGetFunctionPointer("nvmlEventSetFree")
ret = fn(eventSet)
_nvmlCheckReturn(ret)
return None
# Added in 3.295
def nvmlDeviceOnSameBoard(handle1, handle2):
fn = _nvmlGetFunctionPointer("nvmlDeviceOnSameBoard")
onSameBoard = c_int()
ret = fn(handle1, handle2, byref(onSameBoard))
_nvmlCheckReturn(ret)
return (onSameBoard.value != 0)
# Added in 3.295
def nvmlDeviceGetCurrPcieLinkGeneration(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrPcieLinkGeneration")
gen = c_uint()
ret = fn(handle, byref(gen))
_nvmlCheckReturn(ret)
return gen.value
# Added in 3.295
def nvmlDeviceGetMaxPcieLinkGeneration(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxPcieLinkGeneration")
gen = c_uint()
ret = fn(handle, byref(gen))
_nvmlCheckReturn(ret)
return gen.value
# Added in 3.295
def nvmlDeviceGetCurrPcieLinkWidth(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrPcieLinkWidth")
width = c_uint()
ret = fn(handle, byref(width))
_nvmlCheckReturn(ret)
return width.value
# Added in 3.295
def nvmlDeviceGetMaxPcieLinkWidth(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxPcieLinkWidth")
width = c_uint()
ret = fn(handle, byref(width))
_nvmlCheckReturn(ret)
return width.value
# Added in 4.304
def nvmlDeviceGetSupportedClocksThrottleReasons(handle):
c_reasons= c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedClocksThrottleReasons")
ret = fn(handle, byref(c_reasons))
_nvmlCheckReturn(ret)
return c_reasons.value
# Added in 4.304
def nvmlDeviceGetCurrentClocksThrottleReasons(handle):
c_reasons= c_ulonglong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrentClocksThrottleReasons")
ret = fn(handle, byref(c_reasons))
_nvmlCheckReturn(ret)
return c_reasons.value
# Added in 5.319
def nvmlDeviceGetIndex(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetIndex")
c_index = c_uint()
ret = fn(handle, byref(c_index))
_nvmlCheckReturn(ret)
return c_index.value
# Added in 5.319
def nvmlDeviceGetAccountingMode(handle):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingMode")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceSetAccountingMode(handle, mode):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetAccountingMode")
ret = fn(handle, _nvmlEnableState_t(mode))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceClearAccountingPids(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceClearAccountingPids")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetAccountingStats(handle, pid):
stats = c_nvmlAccountingStats_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingStats")
ret = fn(handle, c_uint(pid), byref(stats))
_nvmlCheckReturn(ret)
if (stats.maxMemoryUsage == NVML_VALUE_NOT_AVAILABLE_ulonglong.value):
# special case for WDDM on Windows, see comment above
stats.maxMemoryUsage = None
return stats
def nvmlDeviceGetAccountingPids(handle):
count = c_uint(nvmlDeviceGetAccountingBufferSize(handle))
pids = (c_uint * count.value)()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingPids")
ret = fn(handle, byref(count), pids)
_nvmlCheckReturn(ret)
return map(int, pids[0:count.value])
def nvmlDeviceGetAccountingBufferSize(handle):
bufferSize = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAccountingBufferSize")
ret = fn(handle, byref(bufferSize))
_nvmlCheckReturn(ret)
return int(bufferSize.value)
def nvmlDeviceGetRetiredPages(device, sourceFilter):
c_source = _nvmlPageRetirementCause_t(sourceFilter)
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetRetiredPages")
# First call will get the size
ret = fn(device, c_source, byref(c_count), None)
# this should only fail with insufficient size
if ((ret != NVML_SUCCESS) and
(ret != NVML_ERROR_INSUFFICIENT_SIZE)):
raise NVMLError(ret)
# call again with a buffer
# oversize the array for the rare cases where additional pages
# are retired between NVML calls
c_count.value = c_count.value * 2 + 5
page_array = c_ulonglong * c_count.value
c_pages = page_array()
ret = fn(device, c_source, byref(c_count), c_pages)
_nvmlCheckReturn(ret)
return map(int, c_pages[0:c_count.value])
def nvmlDeviceGetRetiredPagesPendingStatus(device):
c_pending = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetRetiredPagesPendingStatus")
ret = fn(device, byref(c_pending))
_nvmlCheckReturn(ret)
return int(c_pending.value)
def nvmlDeviceGetAPIRestriction(device, apiType):
c_permission = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAPIRestriction")
ret = fn(device, _nvmlRestrictedAPI_t(apiType), byref(c_permission))
_nvmlCheckReturn(ret)
return int(c_permission.value)
def nvmlDeviceSetAPIRestriction(handle, apiType, isRestricted):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetAPIRestriction")
ret = fn(handle, _nvmlRestrictedAPI_t(apiType), _nvmlEnableState_t(isRestricted))
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetBridgeChipInfo(handle):
bridgeHierarchy = c_nvmlBridgeChipHierarchy_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBridgeChipInfo")
ret = fn(handle, byref(bridgeHierarchy))
_nvmlCheckReturn(ret)
return bridgeHierarchy
def nvmlDeviceGetSamples(device, sampling_type, timeStamp):
c_sampling_type = _nvmlSamplingType_t(sampling_type)
c_time_stamp = c_ulonglong(timeStamp)
c_sample_count = c_uint(0)
c_sample_value_type = _nvmlValueType_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSamples")
## First Call gets the size
ret = fn(device, c_sampling_type, c_time_stamp, byref(c_sample_value_type), byref(c_sample_count), None)
# Stop if this fails
if (ret != NVML_SUCCESS):
raise NVMLError(ret)
sampleArray = c_sample_count.value * c_nvmlSample_t
c_samples = sampleArray()
ret = fn(device, c_sampling_type, c_time_stamp, byref(c_sample_value_type), byref(c_sample_count), c_samples)
_nvmlCheckReturn(ret)
return (c_sample_value_type.value, c_samples[0:c_sample_count.value])
def nvmlDeviceGetViolationStatus(device, perfPolicyType):
c_perfPolicy_type = _nvmlPerfPolicyType_t(perfPolicyType)
c_violTime = c_nvmlViolationTime_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetViolationStatus")
## Invoke the method to get violation time
ret = fn(device, c_perfPolicy_type, byref(c_violTime))
_nvmlCheckReturn(ret)
return c_violTime
def nvmlDeviceGetPcieThroughput(device, counter):
c_util = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPcieThroughput")
ret = fn(device, _nvmlPcieUtilCounter_t(counter), byref(c_util))
_nvmlCheckReturn(ret)
return c_util.value
def nvmlSystemGetTopologyGpuSet(cpuNumber):
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlSystemGetTopologyGpuSet")
# First call will get the size
ret = fn(cpuNumber, byref(c_count), None)
if ret != NVML_SUCCESS:
raise NVMLError(ret)
print(c_count.value)
# call again with a buffer
device_array = c_nvmlDevice_t * c_count.value
c_devices = device_array()
ret = fn(cpuNumber, byref(c_count), c_devices)
_nvmlCheckReturn(ret)
return map(None, c_devices[0:c_count.value])
def nvmlDeviceGetTopologyNearestGpus(device, level):
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTopologyNearestGpus")
# First call will get the size
ret = fn(device, level, byref(c_count), None)
if ret != NVML_SUCCESS:
raise NVMLError(ret)
# call again with a buffer
device_array = c_nvmlDevice_t * c_count.value
c_devices = device_array()
ret = fn(device, level, byref(c_count), c_devices)
_nvmlCheckReturn(ret)
return map(None, c_devices[0:c_count.value])
def nvmlDeviceGetTopologyCommonAncestor(device1, device2):
c_level = _nvmlGpuTopologyLevel_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTopologyCommonAncestor")
ret = fn(device1, device2, byref(c_level))
_nvmlCheckReturn(ret)
return c_level.value
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.