code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
#
# University of Luxembourg
# Laboratory of Algorithmics, Cryptology and Security (LACS)
#
# FigureOfMerit (FOM)
#
# Copyright (C) 2015 University of Luxembourg
#
# Written in 2015 by Daniel Dinu <dumitru-daniel.dinu@uni.lu>
#
# This file is part of FigureOfMerit.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from Scenario2 import Constants
__author__ = 'daniel.dinu'
class CipherImplementationMetrics:
def __init__(self,
implementation_type=Constants.DEFAULT_IMPLEMENTATION_TYPE,
code_size_e=Constants.DEFAULT_METRIC_VALUE,
ram_stack_e=Constants.DEFAULT_METRIC_VALUE,
ram_data=Constants.DEFAULT_METRIC_VALUE,
execution_time_e=Constants.DEFAULT_METRIC_VALUE):
"""
Initialize cipher implementation metrics
:param implementation_type: Implementation type
:param code_size_e: Encryption code size
:param ram_stack_e: Encryption stack RAM
:param ram_data: Data RAM
:param execution_time_e: Encryption execution time
"""
self.implementation_type = implementation_type
self.code_size_e = code_size_e
self.ram_stack_e = ram_stack_e
self.ram_data = ram_data
self.execution_time_e = execution_time_e
|
GaloisInc/hacrypto
|
src/Tools/FigureOfMerit/FigureOfMerit/BlockCiphers/Scenario2/CipherImplementationMetrics.py
|
Python
|
bsd-3-clause
| 1,891
|
import uuid
import warnings
from enum import Enum, auto, unique
from ocdsmerge.exceptions import DuplicateIdValueWarning, InconsistentTypeError
VERSIONED_VALUE_KEYS = frozenset(['releaseID', 'releaseDate', 'releaseTag', 'value'])
@unique
class MergeStrategy(Enum):
APPEND = auto()
MERGE_BY_POSITION = auto()
globals().update(MergeStrategy.__members__)
class IdValue(str):
"""
A string with ``identifier`` and ``original_value`` properties.
"""
def __init__(self, identifier):
self.identifier = identifier
str.__init__(identifier)
@property
def original_value(self):
return self._original_value
@original_value.setter
def original_value(self, original_value):
self._original_value = original_value
class IdDict(dict):
"""
A dictionary with an ``identifier`` property.
"""
@property
def identifier(self):
return self._identifier
@identifier.setter
def identifier(self, identifier):
self._identifier = identifier
def is_versioned_value(value):
"""
Returns whether the value is a versioned value.
"""
return len(value) == 4 and VERSIONED_VALUE_KEYS.issuperset(value)
def flatten(obj, merge_rules, rule_overrides, flattened, path=(), rule_path=(), versioned=False):
"""
Flattens a JSON object into key-value pairs, in which the key is the JSON path as a tuple. For example:
Replaces numbers in JSON paths (representing positions in arrays) with special objects. This ensures that objects
in arrays with different `id` values have different JSON paths – and makes it easy to identify such arrays.
.. code:: json
{
"a": "I am a",
"b": ["A", "list"],
"c": [
{"id": 1, "cb": "I am ca"},
{"id": 2, "ca": "I am cb"}
]
}
flattens to:
.. code:: python
{
('a',): 'I am a',
('b',): ['A', 'list'],
('a', '1', 'cb'): 'I am ca',
('a', '1', 'id'): 1,
('a', '2', 'ca'): 'I am cb',
('a', '2', 'id'): 2,
}
"""
# For an exploration of alternatives, see: https://github.com/open-contracting/ocds-merge/issues/26
if type(obj) is list:
is_dict = False
iterable = _enumerate(obj, path, rule_path, rule_overrides.get(rule_path))
new_rule_path = rule_path
else:
is_dict = True
iterable = obj.items()
for key, value in iterable:
if is_dict:
new_rule_path = rule_path + (key,)
new_path_merge_rules = merge_rules.get(new_rule_path, set())
if 'omitWhenMerged' in new_path_merge_rules:
continue
# If it's `wholeListMerge`, if it's neither an object nor an array, if it's an array containing non-objects
# (even if `wholeListMerge` is `false`), or if it's versioned values, use the whole list merge strategy.
# Note: Behavior is undefined and inconsistent if the array is not in the schema and contains objects in some
# cases but not in others.
# See https://standard.open-contracting.org/1.1/en/schema/merging/#whole-list-merge
# See https://standard.open-contracting.org/1.1/en/schema/merging/#objects
elif 'wholeListMerge' in new_path_merge_rules or not isinstance(value, (dict, list)) or \
type(value) is list and any(not isinstance(item, dict) for item in value) or \
versioned and value and all(is_versioned_value(item) for item in value):
flattened[path + (key,)] = value
# Recurse into non-empty objects, and arrays of objects that aren't `wholeListMerge`.
elif value:
flatten(value, merge_rules, rule_overrides, flattened, path + (key,), new_rule_path, versioned)
return flattened
def _enumerate(obj, path, rule_path, rule):
# This tracks the identifiers of objects in an array, to warn about collisions.
identifiers = {}
for key, value in enumerate(obj):
new_key, default_key = _id_value(key, value, rule)
# Check whether the identifier is used by other objects in the array.
default_path = path + (default_key,)
if default_path not in identifiers:
identifiers[default_path] = key
elif identifiers[default_path] != key:
warnings.warn(DuplicateIdValueWarning(rule_path, default_key, 'Multiple objects have the `id` '
f"value {default_key!r} in the `{'.'.join(map(str, rule_path))}` array"))
yield new_key, value
def _id_value(key, value, rule):
# If it is an array of objects, get the `id` value to apply the identifier merge strategy.
# https://standard.open-contracting.org/latest/en/schema/merging/#identifier-merge
if 'id' in value:
id_value = value['id']
identifier = id_value
# If the object contained no top-level `id` value, set a unique value.
else:
id_value = None
identifier = str(uuid.uuid1(1)) # use 1 instead of MAC address
# Calculate the default key for the warning.
default_key = IdValue(identifier)
if rule == MergeStrategy.APPEND:
if 'id' in value:
new_key = IdValue(str(uuid.uuid1(1)))
else: # avoid creating an extra UUID
new_key = default_key
elif rule == MergeStrategy.MERGE_BY_POSITION:
new_key = IdValue(key)
else:
new_key = default_key
# Save the original value. (If the value is an integer, this avoids coercing it to a string.)
new_key.original_value = id_value
return new_key, default_key
def unflatten(flattened):
"""
Unflattens a flattened object into a JSON object.
"""
unflattened = {}
identifiers = {}
for key in flattened:
current_node = unflattened
for end, part in enumerate(key, 1):
# If this is a path to an item of an array.
# See https://standard.open-contracting.org/1.1/en/schema/merging/#identifier-merge
if type(part) is IdValue:
path = key[:end]
# If the `id` of an object in the array matches, change into it.
id_path = path + (part.identifier,)
if id_path in identifiers:
current_node = identifiers[id_path]
# Otherwise, append a new object, and change into it.
else:
new_node = IdDict()
new_node.identifier = part.identifier
# If the original object had an `id` value, set it.
if part.original_value is not None:
new_node['id'] = part.original_value
# Cache which identifiers appear in which arrays.
identifiers[path + (new_node.identifier,)] = new_node
current_node.append(new_node)
current_node = new_node
continue
if not isinstance(current_node, dict):
message = 'An earlier release had the literal {!r} for /{}, but the current release has an object with a {!r} key' # noqa: E501
raise InconsistentTypeError(message.format(current_node, '/'.join(key[:end - 1]), part))
# Otherwise, this is a path to a property of an object. If this is a path to a node we visited before,
# change into it. If it's an `id` field, it's already been set to its original value.
if part in current_node:
current_node = current_node[part]
continue
if end < len(key):
# If the path is to a new array, start a new array, and change into it.
if type(key[end]) is IdValue:
current_node[part] = []
# If the path is to a new object, start a new object, and change into it.
else:
current_node[part] = {}
current_node = current_node[part]
continue
# If this is a full path, copy the data, omitting null'ed fields.
if flattened[key] is not None:
current_node[part] = flattened[key]
return unflattened
|
open-contracting/ocds-merge
|
ocdsmerge/flatten.py
|
Python
|
bsd-3-clause
| 8,273
|
#!/usr/bin/env python
from boomslang import Bar, ClusteredBars, Plot
import unittest
from ImageComparisonTestCase import ImageComparisonTestCase
class ClusteredBarsTest(ImageComparisonTestCase, unittest.TestCase):
def __init__(self, testCaseName):
super(ClusteredBarsTest,self).__init__(testCaseName)
self.imageName = "clusteredbars.png"
def constructImage(self):
bar1 = Bar()
bar1.xValues = range(5)
bar1.yValues = [2, 4, 6, 8, 10]
bar1.color = "red"
bar1.label = "Red Cluster"
bar2 = Bar()
bar2.xValues = range(5)
bar2.yValues = [3, 12, 4, 8, 14]
bar2.color = "blue"
bar2.label = "Blue Cluster"
bar3 = Bar()
bar3.xValues = range(5)
bar3.yValues = [1, 6, 9, 13, 20]
bar3.color = "green"
bar3.label = "Green Cluster"
clusteredBars = ClusteredBars()
clusteredBars.add(bar1)
clusteredBars.add(bar2)
clusteredBars.add(bar3)
clusteredBars.spacing = 0.5
clusteredBars.xTickLabels = ["A", "B", "C", "D", "E"]
plot = Plot()
plot.add(clusteredBars)
plot.hasLegend()
plot.save(self.imageName)
ImageComparisonTestCase.register(ClusteredBarsTest)
if __name__ == "__main__":
test = ClusteredBarsTest("testImageComparison")
test.constructImage()
|
alexras/boomslang
|
tests/test_clusteredbars.py
|
Python
|
bsd-3-clause
| 1,377
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import unittest
from auto_nag import utils
from auto_nag.bzcleaner import BzCleaner
from auto_nag.scripts.regression_set_status_flags import RegressionSetStatusFlags
def mock_get_checked_versions():
return {
"release": 2,
"beta": 3,
"nightly": 4,
"central": 4,
"esr": 3,
"esr_previous": 2,
}
def mock_get_bugs(self, *args, **kwargs):
return {
"1111": {
"id": 1111,
"cf_status_firefox_esr2": "---",
"cf_status_firefox_esr3": "---",
"cf_status_firefox2": "---",
"cf_status_firefox3": "affected",
"cf_status_firefox4": "fixed",
"regressed_by": 111,
},
"2222": {
"id": 2222,
"cf_status_firefox_esr2": "---",
"cf_status_firefox_esr3": "---",
"cf_status_firefox2": "---",
"cf_status_firefox3": "---",
"cf_status_firefox4": "---",
"regressed_by": 222,
},
"3333": {
"id": 3333,
"cf_status_firefox_esr2": "---",
"cf_status_firefox_esr3": "---",
"cf_status_firefox2": "---",
"cf_status_firefox3": "affected",
"cf_status_firefox4": "fixed",
"regressed_by": 333,
},
}
def mock_get_flags_from_regressing_bugs(self, bugids):
assert sorted(bugids) == [111, 222, 333]
return {
111: {
"id": 111,
"cf_status_firefox_esr3": "fixed",
"cf_status_firefox3": "fixed",
},
222: {
"id": 222,
"cf_status_firefox1": "fixed",
},
333: {
"id": 333,
"cf_status_firefox_esr3": "fixed",
"cf_status_firefox3": "fixed",
"groups": ["core-security-release"],
},
}
class TestSetStatusFlags(unittest.TestCase):
def setUp(self):
self.orig_get_checked_versions = utils.get_checked_versions
self.orig_get_bugs = BzCleaner.get_bugs
self.orig_get_flags_from_regressing_bugs = (
RegressionSetStatusFlags.get_flags_from_regressing_bugs
)
utils.get_checked_versions = mock_get_checked_versions
BzCleaner.get_bugs = mock_get_bugs
RegressionSetStatusFlags.get_flags_from_regressing_bugs = (
mock_get_flags_from_regressing_bugs
)
def tearDown(self):
utils.get_checked_versions = self.orig_get_checked_versions
BzCleaner.get_bugs = self.orig_get_bugs
RegressionSetStatusFlags.get_flags_from_regressing_bugs = (
self.orig_get_flags_from_regressing_bugs
)
def test_status_changes(self):
r = RegressionSetStatusFlags()
bugs = r.get_bugs()
self.assertEqual(sorted(bugs), ["1111", "2222", "3333"])
self.assertEqual(list(r.status_changes), ["1111", "2222", "3333"])
self.assertEqual(
sorted(r.status_changes["1111"]),
[
"cf_status_firefox2",
"cf_status_firefox_esr2",
"cf_status_firefox_esr3",
"comment",
],
)
self.assertEqual(r.status_changes["1111"]["cf_status_firefox2"], "unaffected")
self.assertEqual(
r.status_changes["1111"]["cf_status_firefox_esr2"], "unaffected"
)
self.assertEqual(r.status_changes["1111"]["cf_status_firefox_esr3"], "affected")
self.assertFalse(r.status_changes["1111"]["comment"]["is_private"])
self.assertTrue(r.status_changes["3333"]["comment"]["is_private"])
|
mozilla/relman-auto-nag
|
auto_nag/tests/test_regression_set_status_flags.py
|
Python
|
bsd-3-clause
| 3,826
|
# Copyright 2017 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Data providers for deepvariant images.
tf.data.Dataset and data providers for standard DeepVariant datasets for
training and evaluating germline calling accuracy.
"""
from absl import logging
import tensorflow as tf
from google.protobuf import text_format
from third_party.nucleus.io import sharded_file_utils
from deepvariant import dv_constants
from deepvariant import tf_utils
from deepvariant.protos import deepvariant_pb2
# These are empirically determined to work well on TPU with our data sets,
# where lots of buffering and concurrency is necessary to keep the device
# busy.
# These are settable in the constructor.
_DEFAULT_INPUT_READ_THREADS = 32
_DEFAULT_SHUFFLE_BUFFER_ELEMENTS = 100
_DEFAULT_INITIAL_SHUFFLE_BUFFER_ELEMENTS = 1024
_DEFAULT_PREFETCH_BUFFER_BYTES = 16 * 1000 * 1000
class DeepVariantInput(object):
"""This class serves as an `input_fn` for the `tf.estimator` framework."""
# Calling this object like a function returns a stream of variadic tuples.
# Essentially it is a buffered io library, that handles concurrently
# reading and possibly shuffling input records from a set of files. It
# knows how to parse features we care about from tf.examples. It records
# some extra information about the source of the input, such as the name
# and number of classes.
def __init__(
self,
mode,
input_file_spec,
num_examples=None,
num_classes=dv_constants.NUM_CLASSES,
max_examples=None,
tensor_shape=None,
name=None,
use_tpu=False,
input_read_threads=_DEFAULT_INPUT_READ_THREADS,
shuffle_buffer_size=_DEFAULT_SHUFFLE_BUFFER_ELEMENTS,
initial_shuffle_buffer_size=_DEFAULT_INITIAL_SHUFFLE_BUFFER_ELEMENTS,
prefetch_dataset_buffer_size=_DEFAULT_PREFETCH_BUFFER_BYTES,
sloppy=True,
list_files_shuffle=True,
debugging_true_label_mode=False):
"""Create an DeepVariantInput object, usable as an `input_fn`.
Args:
mode: the mode string (from `tf.estimator.ModeKeys`).
input_file_spec: the input filename for a tfrecord[.gz] file containing
examples. Can contain sharding designators.
num_examples: the number of examples contained in the input file. Required
for setting learning rate schedule in train/eval only.
num_classes: The number of classes in the labels of this dataset.
Currently defaults to DEFAULT_NUM_CLASSES.
max_examples: The maximum number of examples to use. If None, all examples
will be used. If not None, the first n = min(max_examples, num_examples)
will be used. This works with training, and the n examples will repeat
over and over.
tensor_shape: None (which means we get the shape from the first example in
source), or list of int [height, width, channel] for testing.
name: string, name of the dataset.
use_tpu: use code paths tuned for TPU, in particular protobuf encoding.
Default False.
input_read_threads: number of threads for reading data. Default 32.
shuffle_buffer_size: size of the final shuffle buffer, in elements.
Default 100.
initial_shuffle_buffer_size: int; the size of the dataset.shuffle buffer
in elements. Default is 1024.
prefetch_dataset_buffer_size: int; the size of the TFRecordDataset buffer
in bytes. Default is 16 * 1000 * 1000.
sloppy: boolean, allow parallel_interleave to be sloppy. Default True.
list_files_shuffle: boolean, allow list_files to shuffle. Default True.
debugging_true_label_mode: boolean. If true, the input examples are
created with "training" mode. We'll parse the 'label' field even if the
`mode` is PREDICT.
Raises:
ValueError: if `num_examples` not provided, in a context requiring it.
"""
self.mode = mode
self.input_file_spec = input_file_spec
self.name = name
self.num_examples = num_examples
self.num_classes = num_classes
self.max_examples = max_examples
self.use_tpu = use_tpu
self.sloppy = sloppy
self.list_files_shuffle = list_files_shuffle
self.input_read_threads = input_read_threads
self.shuffle_buffer_size = shuffle_buffer_size
self.initial_shuffle_buffer_size = initial_shuffle_buffer_size
self.prefetch_dataset_buffer_size = prefetch_dataset_buffer_size
self.debugging_true_label_mode = debugging_true_label_mode
self.feature_extraction_spec = self.features_extraction_spec_for_mode(
mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL) or
debugging_true_label_mode)
if num_examples is None and mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError('num_examples argument required for DeepVariantInput'
'in TRAIN/EVAL modes.')
if max_examples is not None:
if max_examples <= 0:
raise ValueError(
'max_examples must be > 0 if not None. Got {}'.format(max_examples))
# We update our num_examples in the situation where num_examples is set
# (i.e., is not None) to the smaller of max_examples and num_examples.
if self.num_examples is not None:
self.num_examples = min(max_examples, self.num_examples)
if tensor_shape:
self.tensor_shape = tensor_shape
else:
self.tensor_shape = tf_utils.get_shape_from_examples_path(input_file_spec)
self.input_files = sharded_file_utils.glob_list_sharded_file_patterns(
self.input_file_spec)
def features_extraction_spec_for_mode(self, include_label_and_locus):
"""Returns a dict describing features from a TF.example."""
spec = {
'image/encoded': tf.io.FixedLenFeature((), tf.string),
'variant/encoded': tf.io.FixedLenFeature((), tf.string),
'alt_allele_indices/encoded': tf.io.FixedLenFeature((), tf.string),
'variant_type': tf.io.FixedLenFeature((), tf.int64),
'sequencing_type': tf.io.FixedLenFeature([], tf.int64),
}
if include_label_and_locus:
# N.B. int32 fails here on TPU.
spec['label'] = tf.io.FixedLenFeature((), tf.int64)
spec['locus'] = tf.io.FixedLenFeature((), tf.string)
return spec
def parse_tfexample(self, tf_example):
"""Parse a DeepVariant pileup tf.Example to features and labels.
This potentially stores parsed strings as fixed length tensors of integers,
as required by TPU. They have to be handled properly by consumers.
Args:
tf_example: a serialized tf.Example for a DeepVariant "pileup".
Returns:
If (mode is EVAL or TRAIN) or debugging_true_label_mode:
(features, label) ...
If mode is PREDICT,
features ...
"""
# redacted
with tf.compat.v1.name_scope('input'):
parsed = tf.io.parse_single_example(
serialized=tf_example, features=self.feature_extraction_spec)
image = parsed['image/encoded']
if self.tensor_shape:
# If the input is empty there won't be a tensor_shape.
image = tf.reshape(tf.io.decode_raw(image, tf.uint8), self.tensor_shape)
if self.use_tpu:
# Cast to int32 for loading onto the TPU
image = tf.cast(image, tf.int32)
variant = parsed['variant/encoded']
alt_allele_indices = parsed['alt_allele_indices/encoded']
if self.use_tpu:
# Passing a string to a TPU draws this error: TypeError: <dtype:
# 'string'> is not a supported TPU infeed type. Supported types are:
# [tf.float32, tf.int32, tf.complex64, tf.int64, tf.bool, tf.bfloat16]
# Thus, we must encode the string as a tensor of int.
variant = tf_utils.string_to_int_tensor(variant)
alt_allele_indices = tf_utils.string_to_int_tensor(alt_allele_indices)
features = {
'image': image,
'variant': variant,
'alt_allele_indices': alt_allele_indices,
'sequencing_type': parsed['sequencing_type'],
}
if (self.mode in (tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL)
or self.debugging_true_label_mode):
if self.use_tpu:
features['locus'] = tf_utils.string_to_int_tensor(parsed['locus'])
else:
features['locus'] = parsed['locus']
# Add variant_type to our features if are in TRAIN or EVAL mode.
features['variant_type'] = parsed['variant_type']
if self.mode in (tf.estimator.ModeKeys.TRAIN,
tf.estimator.ModeKeys.EVAL):
label = parsed['label']
return features, label
features['label'] = parsed['label']
# For predict model, label is not present. So, returns features only.
return features
def __call__(self, params):
"""Interface to get a data batch, fulfilling `input_fn` contract.
Args:
params: a dict containing an integer value for key 'batch_size'.
Returns:
the tuple (features, labels), where:
- features is a dict of Tensor-valued input features; keys populated
are:
'image'
'variant'
'alt_allele_indices'
and, if not PREDICT mode, also:
'locus'
Aside from 'image', these may be encoded specially for TPU.
- label is the Tensor-valued prediction label; in train/eval
mode the label value is is populated from the data source; in
inference mode, the value is a constant empty Tensor value "()".
"""
# See https://cloud.google.com/tpu/docs/tutorials/inception-v3-advanced
# for some background on tuning this on TPU.
# TPU optimized implementation for prediction mode
if self.mode == tf.estimator.ModeKeys.PREDICT:
return self.prediction_input_fn(params)
# Optimized following:
# https://www.tensorflow.org/guide/performance/datasets
# using the information available from xprof.
def load_dataset(filename):
dataset = tf.data.TFRecordDataset(
filename,
buffer_size=self.prefetch_dataset_buffer_size,
compression_type=compression_type)
return dataset
batch_size = params['batch_size']
compression_type = tf_utils.compression_type_of_files(self.input_files)
# NOTE: The order of the file names returned can be non-deterministic,
# even if shuffle is false. See internal and the note in internal.
# We need the shuffle flag to be able to disable reordering in EVAL mode.
dataset = None
for pattern in self.input_file_spec.split(','):
one_dataset = tf.data.Dataset.list_files(
sharded_file_utils.normalize_to_sharded_file_pattern(pattern),
shuffle=self.mode == tf.estimator.ModeKeys.TRAIN)
dataset = dataset.concatenate(one_dataset) if dataset else one_dataset
# This shuffle applies to the set of files.
# redacted
if (self.mode == tf.estimator.ModeKeys.TRAIN and
self.initial_shuffle_buffer_size > 0):
dataset = dataset.shuffle(self.initial_shuffle_buffer_size)
# For both TRAIN and EVAL, use the following to speed up.
if self.sloppy:
options = tf.data.Options()
options.experimental_deterministic = False
dataset = dataset.with_options(options)
dataset = dataset.interleave(
load_dataset,
cycle_length=self.input_read_threads,
num_parallel_calls=tf.data.AUTOTUNE)
if self.max_examples is not None:
dataset = dataset.take(self.max_examples)
if self.mode == tf.estimator.ModeKeys.TRAIN:
dataset = dataset.repeat()
# This shuffle applies to the set of records.
if self.mode == tf.estimator.ModeKeys.TRAIN:
if self.shuffle_buffer_size > 0:
dataset = dataset.shuffle(self.shuffle_buffer_size)
dataset = dataset.map(
map_func=self.parse_tfexample, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(batch_size=batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
def prediction_input_fn(self, params):
"""Implementation of `input_fn` contract for prediction mode.
Args:
params: a dict containing an integer value for key 'batch_size'.
Returns:
the tuple (features, labels), where:
- features is a dict of Tensor-valued input features; keys populated
are:
'image'
'variant'
'alt_allele_indices'
Aside from 'image', these may be encoded specially for TPU.
"""
def load_dataset(filename):
dataset = tf.data.TFRecordDataset(
filename,
buffer_size=self.prefetch_dataset_buffer_size,
compression_type=compression_type)
return dataset
batch_size = params['batch_size']
compression_type = tf_utils.compression_type_of_files(self.input_files)
dataset = tf.data.Dataset.list_files(
sharded_file_utils.normalize_to_sharded_file_pattern(
self.input_file_spec),
shuffle=False,
)
logging.vlog(3,
'self.input_read_threads={}'.format(self.input_read_threads))
if self.sloppy:
options = tf.data.Options()
options.experimental_deterministic = False
dataset = dataset.with_options(options)
dataset = dataset.interleave(
load_dataset,
cycle_length=self.input_read_threads,
num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.map(
map_func=self.parse_tfexample, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(tf.data.AUTOTUNE)
return dataset
def __str__(self):
return ('DeepVariantInput(name={}, input_file_spec={}, num_examples={}, '
'mode={})').format(self.name, self.input_file_spec,
self.num_examples, self.mode)
# This is the entry point to get a DeepVariantInput when you start with
# a dataset configuration file name.
def get_input_fn_from_dataset(dataset_config_filename, mode, **kwargs):
"""Creates an input_fn from the dataset config file.
Args:
dataset_config_filename: str. Path to the dataset config pbtxt file.
mode: one of tf.estimator.ModeKeys.{TRAIN,EVAL,PREDICT}
**kwargs: Additional keyword arguments for DeepVariantInput.
Returns:
An input_fn from the specified split in the dataset_config file.
Raises:
ValueError: if the dataset config doesn't have the necessary information.
"""
# Get the metadata.
dataset_config = read_dataset_config(dataset_config_filename)
# Return a reader for the data.
return get_input_fn_from_filespec(
input_file_spec=dataset_config.tfrecord_path,
mode=mode,
num_examples=dataset_config.num_examples,
name=dataset_config.name,
**kwargs)
# This is the entry point to get a DeepVariantInput when you start with
# a tf.example file specification, and associated metadata.
def get_input_fn_from_filespec(input_file_spec, mode, **kwargs):
"""Create a DeepVariantInput function object from a file spec.
Args:
input_file_spec: the tf.example input file specification, possibly sharded.
mode: tf.estimator.ModeKeys.
**kwargs: Additional keyword arguments for DeepVariantInput.
Returns:
A DeepVariantInput object usable as an input_fn.
"""
return DeepVariantInput(mode=mode, input_file_spec=input_file_spec, **kwargs)
# Return the stream of batched images from a dataset.
def get_batches(tf_dataset, model, batch_size):
"""Provides batches of pileup images from this dataset.
Creates a DeepVariantInput for tf_dataset. It instantiates an iterator
on the dataset, and returns the images, labels, encoded_variant
features in batches. This calls model.preprocess_images on the images
(but note that we will be moving that step into model_fn for the
Estimator api).
Args:
tf_dataset: a DeepVariantInput object
model: a model object
batch_size: int batch size
Returns:
(images, labels, encoded_variant)
Raises:
ValueError: if the dataset has the wrong mode.
"""
if tf_dataset.mode not in (tf.estimator.ModeKeys.TRAIN,
tf.estimator.ModeKeys.EVAL):
raise ValueError(
'tf_dataset.mode is {} but must be one of TRAIN or EVAL.'.format(
tf_dataset.mode))
params = dict(batch_size=batch_size)
features, labels = tf.compat.v1.data.make_one_shot_iterator(
tf_dataset(params)).get_next()
images = features['image']
encoded_variant = features['variant']
images = model.preprocess_images(images)
return images, labels, encoded_variant
# This reads a pbtxt file and returns the config proto.
def read_dataset_config(dataset_config_filename):
"""Returns a DeepVariantDatasetConfig proto read from the dataset config file.
Args:
dataset_config_filename: String. Path to the dataset config pbtxt file.
Returns:
A DeepVariantDatasetConfig proto from the dataset_config file.
Raises:
ValueError: if the dataset config doesn't have the necessary information.
"""
with tf.io.gfile.GFile(dataset_config_filename) as f:
dataset_config = text_format.Parse(
f.read(), deepvariant_pb2.DeepVariantDatasetConfig())
if not dataset_config.name:
raise ValueError('dataset_config needs to have a name')
if not dataset_config.tfrecord_path:
raise ValueError('The dataset in the config {} does not have a '
'tfrecord_path.'.format(dataset_config_filename))
# redacted
# of num_examples.
if not dataset_config.num_examples:
raise ValueError('The dataset in the config {} does not have a '
'num_examples.'.format(dataset_config_filename))
return dataset_config
def write_dataset_config_to_pbtxt(dataset_config, dataset_config_filename):
"""Writes the dataset_config to a human-readable text format.
Args:
dataset_config: DeepVariantDatasetConfig. The config to be written out.
dataset_config_filename: String. Path to the output pbtxt file.
"""
with tf.io.gfile.GFile(dataset_config_filename, mode='w') as writer:
writer.write(text_format.MessageToString(dataset_config))
|
google/deepvariant
|
deepvariant/data_providers.py
|
Python
|
bsd-3-clause
| 19,622
|
'''run all examples to make sure we don't get an exception
Note:
If an example contaings plt.show(), then all plot windows have to be closed
manually, at least in my setup.
uncomment plt.show() to show all plot windows
'''
stop_on_error = True
filelist = ['example_glsar.py', 'example_wls.py', 'example_gls.py',
'example_glm.py', 'example_ols_tftest.py', #'example_rpy.py',
'example_ols.py', 'example_ols_minimal.py', 'example_rlm.py',
'example_discrete.py', 'example_predict.py',
'example_ols_table.py',
'tut_ols.py', 'tut_ols_rlm.py', 'tut_ols_wls.py',
# time series
'tsa/ex_arma2.py','tsa/ex_dates.py']
if __name__ == "__main__":
#temporarily disable show
import matplotlib.pyplot as plt
plt_show = plt.show
def noop(*args):
pass
plt.show = noop
cont = raw_input("""Are you sure you want to run all of the examples?
This is done mainly to check that they are up to date.
(y/n) >>> """)
if 'y' in cont.lower():
for run_all_f in filelist:
try:
print "\n\nExecuting example file", run_all_f
print "-----------------------" + "-"*len(run_all_f)
execfile(run_all_f)
except:
#f might be overwritten in the executed file
print "**********************" + "*"*len(run_all_f)
print "ERROR in example file", run_all_f
print "**********************" + "*"*len(run_all_f)
if stop_on_error:
raise
#reenable show after closing windows
plt.close('all')
plt.show = plt_show
plt.show()
|
pprett/statsmodels
|
examples/run_all.py
|
Python
|
bsd-3-clause
| 1,704
|
from cumulusci.cli.cci import main
main()
|
SalesforceFoundation/CumulusCI
|
cumulusci/__main__.py
|
Python
|
bsd-3-clause
| 43
|
# Generated by Django 2.2.14 on 2020-07-10 08:53
from django.db import migrations, models
class Migration(migrations.Migration):
def set_default_to_low(apps, schema_editor):
system_settings = apps.get_model('dojo', 'system_settings')
try:
ss = system_settings.objects.all().first()
jira_sev_value = ss.jira_minimum_severity
if jira_sev_value is None:
ss.jira_minimum_severity = 'Low'
ss.save()
except Exception as e:
# probably a test run such as running unittest, no values in table
pass
dependencies = [
('dojo', '0046_endpoint_status'),
]
operations = [
migrations.AlterField(
model_name='system_settings',
name='jira_minimum_severity',
field=models.CharField(blank=True, choices=[('Critical', 'Critical'), ('High', 'High'), ('Medium', 'Medium'), ('Low', 'Low'), ('Info', 'Info')], default='Low', max_length=20, null=True),
),
migrations.RunPython(set_default_to_low)
]
|
rackerlabs/django-DefectDojo
|
dojo/db_migrations/0047_jira_minimum_severity_default.py
|
Python
|
bsd-3-clause
| 1,084
|
from django.conf import settings
from django.core.cache import cache
from django.test import TestCase
from ultracache import _thread_locals
from ultracache.utils import Ultracache
from ultracache.tests.models import DummyModel
class UtilsTestCase(TestCase):
if "django.contrib.sites" in settings.INSTALLED_APPS:
fixtures = ["sites.json"]
def setUp(self):
super(UtilsTestCase, self).setUp()
cache.clear()
def test_context_manager_like_thing(self):
one = DummyModel.objects.create(title="One", code="one")
two = DummyModel.objects.create(title="Two", code="two")
# Caching with object one
uc = Ultracache(3600, "a", "b")
self.failIf(uc)
uc.cache(one.title)
uc = Ultracache(3600, "a", "b")
self.failUnless(uc)
self.assertEqual(uc.cached, one.title)
one.title = "Onex"
one.save()
uc = Ultracache(3600, "a", "b")
self.failIf(uc)
# Caching with object two. Ensure object one doesn't bleed into this
# section.
uc = Ultracache(3600, "c", "d")
self.failIf(uc)
uc.cache(two.title)
uc = Ultracache(3600, "c", "d")
self.failUnless(uc)
self.assertEqual(uc.cached, two.title)
two.title = "Onez"
one.save()
uc = Ultracache(3600, "c", "d")
self.failUnless(uc)
two.title = "Twox"
two.save()
uc = Ultracache(3600, "c", "d")
self.failIf(uc)
|
praekelt/django-ultracache
|
ultracache/tests/test_utils.py
|
Python
|
bsd-3-clause
| 1,505
|
# Django settings for {{ project_name }} project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/static/"
STATICFILES_ROOT = ''
# URL that handles the static files served from STATICFILES_ROOT.
# Example: "http://static.lawrence.com/", "http://example.com/static/"
STATICFILES_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# A list of locations of additional static files
STATICFILES_DIRS = ()
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = ''
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request':{
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
alex/django-old
|
django/conf/project_template/settings.py
|
Python
|
bsd-3-clause
| 4,751
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for describing routes."""
from googlecloudsdk.api_lib.compute import base_classes
class Describe(base_classes.GlobalDescriber):
"""Describe a route."""
@staticmethod
def Args(parser):
base_classes.GlobalDescriber.Args(parser, 'compute.routes')
base_classes.AddFieldsFlag(parser, 'routes')
@property
def service(self):
return self.compute.routes
@property
def resource_type(self):
return 'routes'
Describe.detailed_help = {
'brief': 'Describe a route',
'DESCRIPTION': """\
*{command}* displays all data associated with a Google Compute
Engine route in a project.
""",
}
|
flgiordano/netcash
|
+/google-cloud-sdk/lib/surface/compute/routes/describe.py
|
Python
|
bsd-3-clause
| 1,244
|
from pydeo.app.controllers.assets_controller import AssetsController
from pydeo.app.controllers.errors_controller import ErrorsController
from pydeo.app.controllers.files_controller import FilesController
from pydeo.app.controllers.index_controller import IndexController
from pydeo.app.controllers.movies_controller import MoviesController
from pydeo.app.controllers.music_controller import MusicController
from pydeo.app.controllers.series_controller import SeriesController
from pydeo.app.controllers.settings_controller import SettingsController
from pydeo.app.controllers.api.movies_api import MoviesAPIController
def setup_routing(app):
# static files
app.route('/img/<filename>', 'GET', AssetsController.img)
app.route('/js/<filename>', 'GET', AssetsController.js)
app.route('/js/lib/<filename>', 'GET', AssetsController.js_lib)
app.route('/css/<filename>', 'GET', AssetsController.css)
app.route('/css/lib/<filename>', 'GET', AssetsController.css_lib)
app.route('/css/lib/font/<filename>', 'GET', AssetsController.css_lib_font)
app.route('/css/fonts/<filename>', 'GET', AssetsController.css_fonts)
app.route('/swf/<filename>', 'GET', AssetsController.swf)
app.route('/favicon.ico', 'GET', AssetsController.favicon)
app.route('/favicon.png', 'GET', AssetsController.favicon)
app.route('/files/movies/<filename>', 'GET', FilesController.movies)
# errors
app.route('/error/404', 'GET', ErrorsController().error_404)
app.route('/error/500', 'GET', ErrorsController().error_500)
# home
app.route('/', 'GET', IndexController().index)
# music
app.route('/music', 'GET', MusicController().index)
# movies
app.route('/movies', 'GET', MoviesController().index)
app.route('/movies/<id>', 'GET', MoviesController().movie)
app.route('/movies/<id>/play', 'GET', MoviesController().play)
# series
app.route('/series', 'GET', SeriesController().index)
# settings
app.route('/settings', 'GET', SettingsController().index)
# REST API routes
# movies
app.route('/api/movies', 'GET', MoviesAPIController().movies)
app.route('/api/movies/fetch', 'GET', MoviesAPIController().movies_fetch)
app.route('/api/movies/reload', 'GET', MoviesAPIController().movies_reload)
app.route('/api/movies/title', 'GET', MoviesAPIController().movies_title)
app.route('/api/movies/<id>', 'GET', MoviesAPIController().movies_id)
|
Rolinh/pydeo
|
pydeo/config/routes.py
|
Python
|
bsd-3-clause
| 2,442
|
"""
redrock.rebin
=============
Tools for binning data.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import numba
def centers2edges(centers):
"""Convert bin centers to bin edges, guessing at what you probably meant
Args:
centers (array): bin centers,
Returns:
array: bin edges, lenth = len(centers) + 1
"""
centers = np.asarray(centers)
edges = np.zeros(len(centers)+1)
#- Interior edges are just points half way between bin centers
edges[1:-1] = (centers[0:-1] + centers[1:]) / 2.0
#- edge edges are extrapolation of interior bin sizes
edges[0] = centers[0] - (centers[1]-edges[1])
edges[-1] = centers[-1] + (centers[-1]-edges[-2])
return edges
# This code is purposely written in a very "C-like" way. The logic
# being that it may help numba optimization and also makes it easier
# if it ever needs to be ported to Cython. Actually Cython versions
# of this code have already been tested and shown to perform no better
# than numba on Intel haswell and KNL architectures.
@numba.jit
def _trapz_rebin(x, y, edges, results):
'''
Numba-friendly version of trapezoidal rebinning
See redrock.rebin.trapz_rebin() for input descriptions.
`results` is pre-allocated array of length len(edges)-1 to keep results
'''
nbin = len(edges) - 1
i = 0 #- index counter for output
j = 0 #- index counter for inputs
yedge = 0.0
area = 0.0
while i < nbin:
#- Seek next sample beyond bin edge
while x[j] <= edges[i]:
j += 1
#- What is the y value where the interpolation crossed the edge?
yedge = y[j-1] + (edges[i]-x[j-1]) * (y[j]-y[j-1]) / (x[j]-x[j-1])
#- Is this sample inside this bin?
if x[j] < edges[i+1]:
area = 0.5 * (y[j] + yedge) * (x[j] - edges[i])
results[i] += area
#- Continue with interior bins
while x[j+1] < edges[i+1]:
j += 1
area = 0.5 * (y[j] + y[j-1]) * (x[j] - x[j-1])
results[i] += area
#- Next sample will be outside this bin; handle upper edge
yedge = y[j] + (edges[i+1]-x[j]) * (y[j+1]-y[j]) / (x[j+1]-x[j])
area = 0.5 * (yedge + y[j]) * (edges[i+1] - x[j])
results[i] += area
#- Otherwise the samples span over this bin
else:
ylo = y[j] + (edges[i]-x[j]) * (y[j] - y[j-1]) / (x[j] - x[j-1])
yhi = y[j] + (edges[i+1]-x[j]) * (y[j] - y[j-1]) / (x[j] - x[j-1])
area = 0.5 * (ylo+yhi) * (edges[i+1]-edges[i])
results[i] += area
i += 1
for i in range(nbin):
results[i] /= edges[i+1] - edges[i]
return
def trapz_rebin(x, y, xnew=None, edges=None):
"""Rebin y(x) flux density using trapezoidal integration between bin edges
Notes:
y is interpreted as a density, as is the output, e.g.
>>> x = np.arange(10)
>>> y = np.ones(10)
>>> trapz_rebin(x, y, edges=[0,2,4,6,8]) #- density still 1, not 2
array([ 1., 1., 1., 1.])
Args:
x (array): input x values.
y (array): input y values.
edges (array): (optional) new bin edges.
Returns:
array: integrated results with len(results) = len(edges)-1
Raises:
ValueError: if edges are outside the range of x or if len(x) != len(y)
"""
if edges is None:
edges = centers2edges(xnew)
else:
edges = np.asarray(edges)
if edges[0] < x[0] or x[-1] < edges[-1]:
raise ValueError('edges must be within input x range')
result = np.zeros(len(edges)-1, dtype=np.float64)
_trapz_rebin(x, y, edges, result)
return result
def rebin_template(template, z, dwave):
"""Rebin a template to a set of wavelengths.
Given a template and a single redshift, rebin the template to a set of
wavelength arrays.
Args:
template (Template): the template object
z (float): the redshift
dwave (dict): the keys are the "wavehash" and the values
are a 1D array containing the wavelength grid.
Returns:
dict: The rebinned template for every basis function and wavelength
grid in dwave.
"""
nbasis = template.flux.shape[0] #- number of template basis vectors
result = { hs:np.array([ trapz_rebin((1.+z)*template.wave, template.flux[b], wave) for b in range(nbasis) ]).transpose() for hs, wave in dwave.items() }
return result
|
sbailey/redrock
|
py/redrock/rebin.py
|
Python
|
bsd-3-clause
| 4,560
|
import unittest
import sys
import py65.assembler
import py65.devices.mpu6502
class Common6502Tests:
"""Tests common to 6502-based microprocessors"""
# Reset
def test_reset_sets_registers_to_initial_states(self):
mpu = self._make_mpu()
mpu.reset()
self.assertEqual(0xFF, mpu.sp)
self.assertEqual(0, mpu.a)
self.assertEqual(0, mpu.x)
self.assertEqual(0, mpu.y)
self.assertEqual(mpu.BREAK | mpu.UNUSED, mpu.p)
# ADC Absolute
def test_adc_bcd_off_absolute_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
self.assertEqual(0x10000, len(mpu.memory))
mpu.memory[0xC000] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_absolute_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.p |= mpu.CARRY
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_absolute_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0xFE
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_absolute_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_absolute_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_absolute_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_absolute_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_absolute_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_absolute_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
# $0000 ADC $C000
self._write(mpu.memory, 0x0000, (0x6D, 0x00, 0xC0))
mpu.memory[0xC000] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Zero Page
def test_adc_bcd_off_zp_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.p |= mpu.CARRY
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_zp_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0xFE
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0xff
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0xff
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.a = 0x40
mpu.p &= ~(mpu.OVERFLOW)
# $0000 ADC $00B0
self._write(mpu.memory, 0x0000, (0x65, 0xB0))
mpu.memory[0x00B0] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Immediate
def test_adc_bcd_off_immediate_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0
# $0000 ADC #$00
self._write(mpu.memory, 0x0000, (0x69, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_immediate_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.p |= mpu.CARRY
# $0000 ADC #$00
self._write(mpu.memory, 0x0000, (0x69, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_immediate_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
# $0000 ADC #$FE
self._write(mpu.memory, 0x0000, (0x69, 0xFE))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_immediate_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
# $0000 ADC #$FF
self._write(mpu.memory, 0x0000, (0x69, 0xFF))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_immediate_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC #$01
self._write(mpu.memory, 0x000, (0x69, 0x01))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_immediate_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC #$FF
self._write(mpu.memory, 0x000, (0x69, 0xff))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_immediate_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC #$01
self._write(mpu.memory, 0x000, (0x69, 0x01))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_immediate_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC #$FF
self._write(mpu.memory, 0x000, (0x69, 0xff))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_immediate_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.a = 0x40
# $0000 ADC #$40
self._write(mpu.memory, 0x0000, (0x69, 0x40))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_on_immediate_79_plus_00_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.CARRY
mpu.a = 0x79
# $0000 ADC #$00
self._write(mpu.memory, 0x0000, (0x69, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_adc_bcd_on_immediate_6f_plus_00_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.CARRY
mpu.a = 0x6f
# $0000 ADC #$00
self._write(mpu.memory, 0x0000, (0x69, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x76, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_adc_bcd_on_immediate_9c_plus_9d(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x9c
# $0000 ADC #$9d
# $0002 ADC #$9d
self._write(mpu.memory, 0x0000, (0x69, 0x9d))
self._write(mpu.memory, 0x0002, (0x69, 0x9d))
mpu.step()
self.assertEqual(0x9f, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
mpu.step()
self.assertEqual(0x0004, mpu.pc)
self.assertEqual(0x93, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ADC Absolute, X-Indexed
def test_adc_bcd_off_abs_x_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_x_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_abs_x_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xFE
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_x_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_x_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0xff
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_x_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.x = 0x03
# $0000 ADC $C000,X
self._write(mpu.memory, 0x0000, (0x7D, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Absolute, Y-Indexed
def test_adc_bcd_off_abs_y_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_y_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.y = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_abs_y_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFE
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_y_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_abs_y_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_abs_y_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.y = 0x03
# $0000 ADC $C000,Y
self._write(mpu.memory, 0x0000, (0x79, 0x00, 0xC0))
mpu.memory[0xC000 + mpu.y] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Zero Page, X-Indexed
def test_adc_bcd_off_zp_x_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_x_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_zp_x_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFE
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_x_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_zp_x_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xff
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_zp_x_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.x = 0x03
# $0000 ADC $0010,X
self._write(mpu.memory, 0x0000, (0x75, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Indirect, Indexed (X)
def test_adc_bcd_off_ind_indexed_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_ind_indexed_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_ind_indexed_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFE
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_ind_indexed_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_ind_indexed_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_ind_indexed_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_ind_indexed_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_ind_indexed_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_ind_indexed_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.x = 0x03
# $0000 ADC ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x61, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ADC Indexed, Indirect (Y)
def test_adc_bcd_off_indexed_ind_carry_clear_in_accumulator_zeroes(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_adc_bcd_off_indexed_ind_carry_set_in_accumulator_zero(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.y = 0x03
mpu.p |= mpu.CARRY
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertNotEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_adc_bcd_off_indexed_ind_carry_clear_in_no_carry_clear_out(self):
mpu = self._make_mpu()
mpu.a = 0x01
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFE
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_indexed_ind_carry_clear_in_carry_set_out(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_adc_bcd_off_indexed_ind_overflow_clr_no_carry_01_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.y = 0x03
# $0000 $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_indexed_ind_overflow_clr_no_carry_01_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x01
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_indexed_ind_overflow_set_no_carry_7f_plus_01(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x7f
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_indexed_ind_overflow_set_no_carry_80_plus_ff(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.a = 0x80
mpu.y = 0x03
# $0000 $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x7f, mpu.a)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_adc_bcd_off_indexed_ind_overflow_set_on_40_plus_40(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.a = 0x40
mpu.y = 0x03
# $0000 ADC ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x71, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Absolute)
def test_and_absolute_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND $ABCD
self._write(mpu.memory, 0x0000, (0x2D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_absolute_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND $ABCD
self._write(mpu.memory, 0x0000, (0x2D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xAA
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Absolute)
def test_and_zp_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND $0010
self._write(mpu.memory, 0x0000, (0x25, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_zp_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND $0010
self._write(mpu.memory, 0x0000, (0x25, 0x10))
mpu.memory[0x0010] = 0xAA
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Immediate)
def test_and_immediate_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND #$00
self._write(mpu.memory, 0x0000, (0x29, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_immediate_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 AND #$AA
self._write(mpu.memory, 0x0000, (0x29, 0xAA))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Absolute, X-Indexed)
def test_and_abs_x_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND $ABCD,X
self._write(mpu.memory, 0x0000, (0x3d, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_abs_x_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND $ABCD,X
self._write(mpu.memory, 0x0000, (0x3d, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xAA
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND (Absolute, Y-Indexed)
def test_and_abs_y_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 AND $ABCD,X
self._write(mpu.memory, 0x0000, (0x39, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_abs_y_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 AND $ABCD,X
self._write(mpu.memory, 0x0000, (0x39, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xAA
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND Indirect, Indexed (X)
def test_and_ind_indexed_x_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x21, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_ind_indexed_x_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x21, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0xAA
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND Indexed, Indirect (Y)
def test_and_indexed_ind_y_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 AND ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x31, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_indexed_ind_y_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 AND ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x31, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xAA
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# AND Zero Page, X-Indexed
def test_and_zp_x_all_zeros_setting_zero_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND $0010,X
self._write(mpu.memory, 0x0000, (0x35, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_and_zp_x_all_zeros_and_ones_setting_negative_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 AND $0010,X
self._write(mpu.memory, 0x0000, (0x35, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xAA
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ASL Accumulator
def test_asl_accumulator_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_accumulator_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x40
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_accumulator_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0x7F
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFE, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_accumulator_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFE, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_asl_accumulator_80_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x80
mpu.p &= ~(mpu.ZERO)
# $0000 ASL A
mpu.memory[0x0000] = 0x0A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# ASL Absolute
def test_asl_absolute_sets_z_flag(self):
mpu = self._make_mpu()
# $0000 ASL $ABCD
self._write(mpu.memory, 0x0000, (0x0E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_absolute_sets_n_flag(self):
mpu = self._make_mpu()
# $0000 ASL $ABCD
self._write(mpu.memory, 0x0000, (0x0E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0xAA
# $0000 ASL $ABCD
self._write(mpu.memory, 0x0000, (0x0E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_absolute_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xAA
# $0000 ASL $ABCD
self._write(mpu.memory, 0x0000, (0x0E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0xABCD])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ASL Zero Page
def test_asl_zp_sets_z_flag(self):
mpu = self._make_mpu()
# $0000 ASL $0010
self._write(mpu.memory, 0x0000, (0x06, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_zp_sets_n_flag(self):
mpu = self._make_mpu()
# $0000 ASL $0010
self._write(mpu.memory, 0x0000, (0x06, 0x10))
mpu.memory[0x0010] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_zp_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0xAA
# $0000 ASL $0010
self._write(mpu.memory, 0x0000, (0x06, 0x10))
mpu.memory[0x0010] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_zp_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xAA
# $0000 ASL $0010
self._write(mpu.memory, 0x0000, (0x06, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0x0010])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ASL Absolute, X-Indexed
def test_asl_abs_x_indexed_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
# $0000 ASL $ABCD,X
self._write(mpu.memory, 0x0000, (0x1E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_abs_x_indexed_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
# $0000 ASL $ABCD,X
self._write(mpu.memory, 0x0000, (0x1E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_abs_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0xAA
mpu.x = 0x03
# $0000 ASL $ABCD,X
self._write(mpu.memory, 0x0000, (0x1E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_abs_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xAA
mpu.x = 0x03
# $0000 ASL $ABCD,X
self._write(mpu.memory, 0x0000, (0x1E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ASL Zero Page, X-Indexed
def test_asl_zp_x_indexed_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
# $0000 ASL $0010,X
self._write(mpu.memory, 0x0000, (0x16, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_asl_zp_x_indexed_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
# $0000 ASL $0010,X
self._write(mpu.memory, 0x0000, (0x16, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_asl_zp_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.a = 0xAA
# $0000 ASL $0010,X
self._write(mpu.memory, 0x0000, (0x16, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_asl_zp_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.a = 0xAA
# $0000 ASL $0010,X
self._write(mpu.memory, 0x0000, (0x16, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xAA, mpu.a)
self.assertEqual(0xFE, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# BCC
def test_bcc_carry_clear_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 BCC +6
self._write(mpu.memory, 0x0000, (0x90, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bcc_carry_clear_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0000 BCC -6
self._write(mpu.memory, 0x0050, (0x90, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bcc_carry_set_does_not_branch(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 BCC +6
self._write(mpu.memory, 0x0000, (0x90, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BCS
def test_bcs_carry_set_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 BCS +6
self._write(mpu.memory, 0x0000, (0xB0, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bcs_carry_set_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0000 BCS -6
self._write(mpu.memory, 0x0050, (0xB0, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bcs_carry_clear_does_not_branch(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 BCS +6
self._write(mpu.memory, 0x0000, (0xB0, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BEQ
def test_beq_zero_set_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
# $0000 BEQ +6
self._write(mpu.memory, 0x0000, (0xF0, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_beq_zero_set_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0000 BEQ -6
self._write(mpu.memory, 0x0050, (0xF0, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_beq_zero_clear_does_not_branch(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
# $0000 BEQ +6
self._write(mpu.memory, 0x0000, (0xF0, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BIT (Absolute)
def test_bit_abs_copies_bit_7_of_memory_to_n_flag_when_0(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0xFF
mpu.a = 0xFF
mpu.step()
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_bit_abs_copies_bit_7_of_memory_to_n_flag_when_1(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.a = 0xFF
mpu.step()
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_bit_abs_copies_bit_6_of_memory_to_v_flag_when_0(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0xFF
mpu.a = 0xFF
mpu.step()
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_bit_abs_copies_bit_6_of_memory_to_v_flag_when_1(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.a = 0xFF
mpu.step()
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_bit_abs_stores_result_of_and_in_z_preserves_a_when_1(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.ZERO
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.a = 0x01
mpu.step()
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x00, mpu.memory[0xFEED])
def test_bit_abs_stores_result_of_and_when_nonzero_in_z_preserves_a(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x01
mpu.a = 0x01
mpu.step()
self.assertEqual(0, mpu.p & mpu.ZERO) # result of AND is non-zero
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x01, mpu.memory[0xFEED])
def test_bit_abs_stores_result_of_and_when_zero_in_z_preserves_a(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
# $0000 BIT $FEED
self._write(mpu.memory, 0x0000, (0x2C, 0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.a = 0x01
mpu.step()
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO) # result of AND is zero
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x00, mpu.memory[0xFEED])
# BIT (Zero Page)
def test_bit_zp_copies_bit_7_of_memory_to_n_flag_when_0(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.a = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_bit_zp_copies_bit_7_of_memory_to_n_flag_when_1(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x00
mpu.a = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_bit_zp_copies_bit_6_of_memory_to_v_flag_when_0(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.a = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(mpu.OVERFLOW, mpu.p & mpu.OVERFLOW)
def test_bit_zp_copies_bit_6_of_memory_to_v_flag_when_1(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x00
mpu.a = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
def test_bit_zp_stores_result_of_and_in_z_preserves_a_when_1(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.ZERO
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x00
mpu.a = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x00, mpu.memory[0x0010])
def test_bit_zp_stores_result_of_and_when_nonzero_in_z_preserves_a(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x01
mpu.a = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(0, mpu.p & mpu.ZERO) # result of AND is non-zero
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x01, mpu.memory[0x0010])
def test_bit_zp_stores_result_of_and_when_zero_in_z_preserves_a(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
# $0000 BIT $0010
self._write(mpu.memory, 0x0000, (0x24, 0x10))
mpu.memory[0x0010] = 0x00
mpu.a = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(3, mpu.processorCycles)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO) # result of AND is zero
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x00, mpu.memory[0x0010])
# BMI
def test_bmi_negative_set_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
# $0000 BMI +06
self._write(mpu.memory, 0x0000, (0x30, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bmi_negative_set_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
mpu.pc = 0x0050
# $0000 BMI -6
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
self._write(mpu.memory, 0x0050, (0x30, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bmi_negative_clear_does_not_branch(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
# $0000 BEQ +6
self._write(mpu.memory, 0x0000, (0x30, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BNE
def test_bne_zero_clear_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
# $0000 BNE +6
self._write(mpu.memory, 0x0000, (0xD0, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bne_zero_clear_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.pc = 0x0050
# $0050 BNE -6
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
self._write(mpu.memory, 0x0050, (0xD0, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bne_zero_set_does_not_branch(self):
mpu = self._make_mpu()
mpu.p |= mpu.ZERO
# $0000 BNE +6
self._write(mpu.memory, 0x0000, (0xD0, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BPL
def test_bpl_negative_clear_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
# $0000 BPL +06
self._write(mpu.memory, 0x0000, (0x10, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bpl_negative_clear_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.pc = 0x0050
# $0050 BPL -6
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
self._write(mpu.memory, 0x0050, (0x10, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bpl_negative_set_does_not_branch(self):
mpu = self._make_mpu()
mpu.p |= mpu.NEGATIVE
# $0000 BPL +6
self._write(mpu.memory, 0x0000, (0x10, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BRK
def test_brk_pushes_pc_plus_2_and_status_then_sets_pc_to_irq_vector(self):
mpu = self._make_mpu()
mpu.p = mpu.UNUSED
self._write(mpu.memory, 0xFFFE, (0xCD, 0xAB))
# $C000 BRK
mpu.memory[0xC000] = 0x00
mpu.pc = 0xC000
mpu.step()
self.assertEqual(0xABCD, mpu.pc)
self.assertEqual(0xC0, mpu.memory[0x1FF]) # PCH
self.assertEqual(0x02, mpu.memory[0x1FE]) # PCL
self.assertEqual(mpu.BREAK | mpu.UNUSED, mpu.memory[0x1FD]) # Status
self.assertEqual(0xFC, mpu.sp)
self.assertEqual(mpu.BREAK | mpu.UNUSED | mpu.INTERRUPT, mpu.p)
# BVC
def test_bvc_overflow_clear_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
# $0000 BVC +6
self._write(mpu.memory, 0x0000, (0x50, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bvc_overflow_clear_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0050 BVC -6
self._write(mpu.memory, 0x0050, (0x50, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bvc_overflow_set_does_not_branch(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 BVC +6
self._write(mpu.memory, 0x0000, (0x50, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# BVS
def test_bvs_overflow_set_branches_relative_forward(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 BVS +6
self._write(mpu.memory, 0x0000, (0x70, 0x06))
mpu.step()
self.assertEqual(0x0002 + 0x06, mpu.pc)
def test_bvs_overflow_set_branches_relative_backward(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
mpu.pc = 0x0050
rel = (0x06 ^ 0xFF + 1) # two's complement of 6
# $0050 BVS -6
self._write(mpu.memory, 0x0050, (0x70, rel))
mpu.step()
self.assertEqual(0x0052 + rel, mpu.pc)
def test_bvs_overflow_clear_does_not_branch(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.OVERFLOW)
# $0000 BVS +6
self._write(mpu.memory, 0x0000, (0x70, 0x06))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
# CLC
def test_clc_clears_carry_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 CLC
mpu.memory[0x0000] = 0x18
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0, mpu.p & mpu.CARRY)
# CLD
def test_cld_clears_decimal_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
# $0000 CLD
mpu.memory[0x0000] = 0xD8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0, mpu.p & mpu.DECIMAL)
# CLI
def test_cli_clears_interrupt_mask_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.INTERRUPT
# $0000 CLI
mpu.memory[0x0000] = 0x58
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0, mpu.p & mpu.INTERRUPT)
# CLV
def test_clv_clears_overflow_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.OVERFLOW
# $0000 CLV
mpu.memory[0x0000] = 0xB8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
# DEC Absolute
def test_dec_abs_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD
self._write(mpu.memory, 0x0000, (0xCE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x10
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_abs_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD
self._write(mpu.memory, 0x0000, (0xCE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_abs_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD
self._write(mpu.memory, 0x0000, (0xCE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Zero Page
def test_dec_zp_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010
self._write(mpu.memory, 0x0000, (0xC6, 0x10))
mpu.memory[0x0010] = 0x10
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_zp_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010
self._write(mpu.memory, 0x0000, (0xC6, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_zp_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010
self._write(mpu.memory, 0x0000, (0xC6, 0x10))
mpu.memory[0x0010] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Absolute, X-Indexed
def test_dec_abs_x_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD,X
self._write(mpu.memory, 0x0000, (0xDE, 0xCD, 0xAB))
mpu.x = 0x03
mpu.memory[0xABCD + mpu.x] = 0x10
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_abs_x_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD,X
self._write(mpu.memory, 0x0000, (0xDE, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_abs_x_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0xABCD,X
self._write(mpu.memory, 0x0000, (0xDE, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEC Zero Page, X-Indexed
def test_dec_zp_x_decrements_memory(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010,X
self._write(mpu.memory, 0x0000, (0xD6, 0x10))
mpu.x = 0x03
mpu.memory[0x0010 + mpu.x] = 0x10
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0F, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dec_zp_x_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010,X
self._write(mpu.memory, 0x0000, (0xD6, 0x10))
mpu.x = 0x03
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dec_zp_x_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
# $0000 DEC 0x0010,X
self._write(mpu.memory, 0x0000, (0xD6, 0x10))
mpu.x = 0x03
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEX
def test_dex_decrements_x(self):
mpu = self._make_mpu()
mpu.x = 0x10
# $0000 DEX
mpu.memory[0x0000] = 0xCA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0F, mpu.x)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dex_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
# $0000 DEX
mpu.memory[0x0000] = 0xCA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFF, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dex_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
mpu.x = 0x01
# $0000 DEX
mpu.memory[0x0000] = 0xCA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# DEY
def test_dey_decrements_y(self):
mpu = self._make_mpu()
mpu.y = 0x10
# $0000 DEY
mpu.memory[0x0000] = 0x88
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0F, mpu.y)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_dey_below_00_rolls_over_and_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
# $0000 DEY
mpu.memory[0x0000] = 0x88
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFF, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_dey_sets_zero_flag_when_decrementing_to_zero(self):
mpu = self._make_mpu()
mpu.y = 0x01
# $0000 DEY
mpu.memory[0x0000] = 0x88
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# EOR Absolute
def test_eor_absolute_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
self._write(mpu.memory, 0x0000, (0x4D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_absolute_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
self._write(mpu.memory, 0x0000, (0x4D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Zero Page
def test_eor_zp_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
self._write(mpu.memory, 0x0000, (0x45, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_zp_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
self._write(mpu.memory, 0x0000, (0x45, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Immediate
def test_eor_immediate_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
self._write(mpu.memory, 0x0000, (0x49, 0xFF))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_immediate_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
self._write(mpu.memory, 0x0000, (0x49, 0xFF))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Absolute, X-Indexed
def test_eor_abs_x_indexed_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x5D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_abs_x_indexed_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x5D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Absolute, Y-Indexed
def test_eor_abs_y_indexed_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
self._write(mpu.memory, 0x0000, (0x59, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_abs_y_indexed_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
self._write(mpu.memory, 0x0000, (0x59, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Indirect, Indexed (X)
def test_eor_ind_indexed_x_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x41, 0x10)) # => EOR ($0010,X)
self._write(mpu.memory, 0x0013, (0xCD, 0xAB)) # => Vector to $ABCD
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_ind_indexed_x_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x41, 0x10)) # => EOR ($0010,X)
self._write(mpu.memory, 0x0013, (0xCD, 0xAB)) # => Vector to $ABCD
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Indexed, Indirect (Y)
def test_eor_indexed_ind_y_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
self._write(mpu.memory, 0x0000, (0x51, 0x10)) # => EOR ($0010),Y
self._write(mpu.memory, 0x0010, (0xCD, 0xAB)) # => Vector to $ABCD
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_indexed_ind_y_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
self._write(mpu.memory, 0x0000, (0x51, 0x10)) # => EOR ($0010),Y
self._write(mpu.memory, 0x0010, (0xCD, 0xAB)) # => Vector to $ABCD
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# EOR Zero Page, X-Indexed
def test_eor_zp_x_indexed_flips_bits_over_setting_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x55, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_eor_zp_x_indexed_flips_bits_over_setting_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x55, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.a)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# INC Absolute
def test_inc_abs_increments_memory(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xEE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x09
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0A, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_abs_increments_memory_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xEE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_abs_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xEE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INC Zero Page
def test_inc_zp_increments_memory(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xE6, 0x10))
mpu.memory[0x0010] = 0x09
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0A, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_zp_increments_memory_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xE6, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_zp_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xE6, 0x10))
mpu.memory[0x0010] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INC Absolute, X-Indexed
def test_inc_abs_x_increments_memory(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xFE, 0xCD, 0xAB))
mpu.x = 0x03
mpu.memory[0xABCD + mpu.x] = 0x09
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x0A, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_abs_x_increments_memory_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xFE, 0xCD, 0xAB))
mpu.x = 0x03
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_abs_x_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xFE, 0xCD, 0xAB))
mpu.x = 0x03
mpu.memory[0xABCD + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INC Zero Page, X-Indexed
def test_inc_zp_x_increments_memory(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xF6, 0x10))
mpu.x = 0x03
mpu.memory[0x0010 + mpu.x] = 0x09
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0A, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_zp_x_increments_memory_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xF6, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inc_zp_x_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
self._write(mpu.memory, 0x0000, (0xF6, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INX
def test_inx_increments_x(self):
mpu = self._make_mpu()
mpu.x = 0x09
mpu.memory[0x0000] = 0xE8 # => INX
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0A, mpu.x)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_inx_above_FF_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
mpu.memory[0x0000] = 0xE8 # => INX
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_inx_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
mpu.x = 0x7f
mpu.memory[0x0000] = 0xE8 # => INX
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# INY
def test_iny_increments_y(self):
mpu = self._make_mpu()
mpu.y = 0x09
mpu.memory[0x0000] = 0xC8 # => INY
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x0A, mpu.y)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_iny_above_FF_rolls_over_and_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
mpu.memory[0x0000] = 0xC8 # => INY
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_iny_sets_negative_flag_when_incrementing_above_7F(self):
mpu = self._make_mpu()
mpu.y = 0x7f
mpu.memory[0x0000] = 0xC8 # => INY
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
# JMP Absolute
def test_jmp_abs_jumps_to_absolute_address(self):
mpu = self._make_mpu()
# $0000 JMP $ABCD
self._write(mpu.memory, 0x0000, (0x4C, 0xCD, 0xAB))
mpu.step()
self.assertEqual(0xABCD, mpu.pc)
# JMP Indirect
def test_jmp_ind_jumps_to_indirect_address(self):
mpu = self._make_mpu()
# $0000 JMP ($ABCD)
self._write(mpu.memory, 0x0000, (0x6C, 0x00, 0x02))
self._write(mpu.memory, 0x0200, (0xCD, 0xAB))
mpu.step()
self.assertEqual(0xABCD, mpu.pc)
# JSR
def test_jsr_pushes_pc_plus_2_and_sets_pc(self):
mpu = self._make_mpu()
# $C000 JSR $FFD2
self._write(mpu.memory, 0xC000, (0x20, 0xD2, 0xFF))
mpu.pc = 0xC000
mpu.step()
self.assertEqual(0xFFD2, mpu.pc)
self.assertEqual(0xFD, mpu.sp)
self.assertEqual(0xC0, mpu.memory[0x01FF]) # PCH
self.assertEqual(0x02, mpu.memory[0x01FE]) # PCL+2
# LDA Absolute
def test_lda_absolute_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
# $0000 LDA $ABCD
self._write(mpu.memory, 0x0000, (0xAD, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_absolute_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 LDA $ABCD
self._write(mpu.memory, 0x0000, (0xAD, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Zero Page
def test_lda_zp_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
# $0000 LDA $0010
self._write(mpu.memory, 0x0000, (0xA5, 0x10))
mpu.memory[0x0010] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_zp_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 LDA $0010
self._write(mpu.memory, 0x0000, (0xA5, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Immediate
def test_lda_immediate_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
# $0000 LDA #$80
self._write(mpu.memory, 0x0000, (0xA9, 0x80))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_immediate_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
# $0000 LDA #$00
self._write(mpu.memory, 0x0000, (0xA9, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Absolute, X-Indexed
def test_lda_abs_x_indexed_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 LDA $ABCD,X
self._write(mpu.memory, 0x0000, (0xBD, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_abs_x_indexed_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 LDA $ABCD,X
self._write(mpu.memory, 0x0000, (0xBD, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lda_abs_x_indexed_does_not_page_wrap(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.x = 0xFF
# $0000 LDA $0080,X
self._write(mpu.memory, 0x0000, (0xBD, 0x80, 0x00))
mpu.memory[0x0080 + mpu.x] = 0x42
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x42, mpu.a)
# LDA Absolute, Y-Indexed
def test_lda_abs_y_indexed_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 LDA $ABCD,Y
self._write(mpu.memory, 0x0000, (0xB9, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_abs_y_indexed_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.y = 0x03
# $0000 LDA $ABCD,Y
self._write(mpu.memory, 0x0000, (0xB9, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lda_abs_y_indexed_does_not_page_wrap(self):
mpu = self._make_mpu()
mpu.a = 0
mpu.y = 0xFF
# $0000 LDA $0080,X
self._write(mpu.memory, 0x0000, (0xB9, 0x80, 0x00))
mpu.memory[0x0080 + mpu.y] = 0x42
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x42, mpu.a)
# LDA Indirect, Indexed (X)
def test_lda_ind_indexed_x_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 LDA ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xA1, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_ind_indexed_x_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 LDA ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xA1, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Indexed, Indirect (Y)
def test_lda_indexed_ind_y_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 LDA ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xB1, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_indexed_ind_y_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.y = 0x03
# $0000 LDA ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0xB1, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDA Zero Page, X-Indexed
def test_lda_zp_x_indexed_loads_a_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0x03
# $0000 LDA $10,X
self._write(mpu.memory, 0x0000, (0xB5, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_lda_zp_x_indexed_loads_a_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.x = 0x03
# $0000 LDA $10,X
self._write(mpu.memory, 0x0000, (0xB5, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Absolute
def test_ldx_absolute_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
# $0000 LDX $ABCD
self._write(mpu.memory, 0x0000, (0xAE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_absolute_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
# $0000 LDX $ABCD
self._write(mpu.memory, 0x0000, (0xAE, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Zero Page
def test_ldx_zp_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
# $0000 LDX $0010
self._write(mpu.memory, 0x0000, (0xA6, 0x10))
mpu.memory[0x0010] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_zp_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
# $0000 LDX $0010
self._write(mpu.memory, 0x0000, (0xA6, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Immediate
def test_ldx_immediate_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
# $0000 LDX #$80
self._write(mpu.memory, 0x0000, (0xA2, 0x80))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_immediate_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
# $0000 LDX #$00
self._write(mpu.memory, 0x0000, (0xA2, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Absolute, Y-Indexed
def test_ldx_abs_y_indexed_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
mpu.y = 0x03
# $0000 LDX $ABCD,Y
self._write(mpu.memory, 0x0000, (0xBE, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_abs_y_indexed_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
mpu.y = 0x03
# $0000 LDX $ABCD,Y
self._write(mpu.memory, 0x0000, (0xBE, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDX Zero Page, Y-Indexed
def test_ldx_zp_y_indexed_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x00
mpu.y = 0x03
# $0000 LDX $0010,Y
self._write(mpu.memory, 0x0000, (0xB6, 0x10))
mpu.memory[0x0010 + mpu.y] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldx_zp_y_indexed_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0xFF
mpu.y = 0x03
# $0000 LDX $0010,Y
self._write(mpu.memory, 0x0000, (0xB6, 0x10))
mpu.memory[0x0010 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Absolute
def test_ldy_absolute_loads_y_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
# $0000 LDY $ABCD
self._write(mpu.memory, 0x0000, (0xAC, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_absolute_loads_y_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
# $0000 LDY $ABCD
self._write(mpu.memory, 0x0000, (0xAC, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Zero Page
def test_ldy_zp_loads_y_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
# $0000 LDY $0010
self._write(mpu.memory, 0x0000, (0xA4, 0x10))
mpu.memory[0x0010] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_zp_loads_y_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
# $0000 LDY $0010
self._write(mpu.memory, 0x0000, (0xA4, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Immediate
def test_ldy_immediate_loads_y_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
# $0000 LDY #$80
self._write(mpu.memory, 0x0000, (0xA0, 0x80))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_immediate_loads_y_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
# $0000 LDY #$00
self._write(mpu.memory, 0x0000, (0xA0, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Absolute, X-Indexed
def test_ldy_abs_x_indexed_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
mpu.x = 0x03
# $0000 LDY $ABCD,X
self._write(mpu.memory, 0x0000, (0xBC, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_abs_x_indexed_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
mpu.x = 0x03
# $0000 LDY $ABCD,X
self._write(mpu.memory, 0x0000, (0xBC, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LDY Zero Page, X-Indexed
def test_ldy_zp_x_indexed_loads_x_sets_n_flag(self):
mpu = self._make_mpu()
mpu.y = 0x00
mpu.x = 0x03
# $0000 LDY $0010,X
self._write(mpu.memory, 0x0000, (0xB4, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_ldy_zp_x_indexed_loads_x_sets_z_flag(self):
mpu = self._make_mpu()
mpu.y = 0xFF
mpu.x = 0x03
# $0000 LDY $0010,X
self._write(mpu.memory, 0x0000, (0xB4, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Accumulator
def test_lsr_accumulator_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR A
mpu.memory[0x0000] = (0x4A)
mpu.a = 0x00
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_accumulator_sets_carry_and_zero_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
# $0000 LSR A
mpu.memory[0x0000] = (0x4A)
mpu.a = 0x01
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_accumulator_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR A
mpu.memory[0x0000] = (0x4A)
mpu.a = 0x04
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x02, mpu.a)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Absolute
def test_lsr_absolute_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $ABCD
self._write(mpu.memory, 0x0000, (0x4E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_absolute_sets_carry_and_zero_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
# $0000 LSR $ABCD
self._write(mpu.memory, 0x0000, (0x4E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_absolute_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $ABCD
self._write(mpu.memory, 0x0000, (0x4E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x04
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Zero Page
def test_lsr_zp_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $0010
self._write(mpu.memory, 0x0000, (0x46, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_zp_sets_carry_and_zero_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
# $0000 LSR $0010
self._write(mpu.memory, 0x0000, (0x46, 0x10))
mpu.memory[0x0010] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_zp_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $0010
self._write(mpu.memory, 0x0000, (0x46, 0x10))
mpu.memory[0x0010] = 0x04
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Absolute, X-Indexed
def test_lsr_abs_x_indexed_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
mpu.x = 0x03
# $0000 LSR $ABCD,X
self._write(mpu.memory, 0x0000, (0x5E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_abs_x_indexed_sets_c_and_z_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
mpu.x = 0x03
# $0000 LSR $ABCD,X
self._write(mpu.memory, 0x0000, (0x5E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_abs_x_indexed_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 LSR $ABCD,X
self._write(mpu.memory, 0x0000, (0x5E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x04
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x02, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# LSR Zero Page, X-Indexed
def test_lsr_zp_x_indexed_rotates_in_zero_not_carry(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
mpu.x = 0x03
# $0000 LSR $0010,X
self._write(mpu.memory, 0x0000, (0x56, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_zp_x_indexed_sets_carry_and_zero_flags_after_rotation(self):
mpu = self._make_mpu()
mpu.p &= ~mpu.CARRY
mpu.x = 0x03
# $0000 LSR $0010,X
self._write(mpu.memory, 0x0000, (0x56, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x01
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_lsr_zp_x_indexed_rotates_bits_right(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
mpu.x = 0x03
# $0000 LSR $0010,X
self._write(mpu.memory, 0x0000, (0x56, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x04
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x02, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
# NOP
def test_nop_does_nothing(self):
mpu = self._make_mpu()
# $0000 NOP
mpu.memory[0x0000] = 0xEA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
# ORA Absolute
def test_ora_absolute_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
# $0000 ORA $ABCD
self._write(mpu.memory, 0x0000, (0x0D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_absolute_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
# $0000 ORA $ABCD
self._write(mpu.memory, 0x0000, (0x0D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x82
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Zero Page
def test_ora_zp_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
# $0000 ORA $0010
self._write(mpu.memory, 0x0000, (0x05, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_zp_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
# $0000 ORA $0010
self._write(mpu.memory, 0x0000, (0x05, 0x10))
mpu.memory[0x0010] = 0x82
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Immediate
def test_ora_immediate_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
# $0000 ORA #$00
self._write(mpu.memory, 0x0000, (0x09, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_immediate_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
# $0000 ORA #$82
self._write(mpu.memory, 0x0000, (0x09, 0x82))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Absolute, X
def test_ora_abs_x_indexed_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 ORA $ABCD,X
self._write(mpu.memory, 0x0000, (0x1D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_abs_x_indexed_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.x = 0x03
# $0000 ORA $ABCD,X
self._write(mpu.memory, 0x0000, (0x1D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x82
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Absolute, Y
def test_ora_abs_y_indexed_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0x03
# $0000 ORA $ABCD,Y
self._write(mpu.memory, 0x0000, (0x19, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_abs_y_indexed_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.y = 0x03
# $0000 ORA $ABCD,Y
self._write(mpu.memory, 0x0000, (0x19, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x82
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Indirect, Indexed (X)
def test_ora_ind_indexed_x_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 ORA ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x01, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_ind_indexed_x_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.x = 0x03
# $0000 ORA ($0010,X)
# $0013 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x01, 0x10))
self._write(mpu.memory, 0x0013, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x82
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Indexed, Indirect (Y)
def test_ora_indexed_ind_y_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0x03
# $0000 ORA ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x11, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_indexed_ind_y_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.y = 0x03
# $0000 ORA ($0010),Y
# $0010 Vector to $ABCD
self._write(mpu.memory, 0x0000, (0x11, 0x10))
self._write(mpu.memory, 0x0010, (0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x82
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# ORA Zero Page, X
def test_ora_zp_x_indexed_zeroes_or_zeros_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 ORA $0010,X
self._write(mpu.memory, 0x0000, (0x15, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_ora_zp_x_indexed_turns_bits_on_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x03
mpu.x = 0x03
# $0000 ORA $0010,X
self._write(mpu.memory, 0x0000, (0x15, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x82
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x83, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
# PHA
def test_pha_pushes_a_and_updates_sp(self):
mpu = self._make_mpu()
mpu.a = 0xAB
# $0000 PHA
mpu.memory[0x0000] = 0x48
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.memory[0x01FF])
self.assertEqual(0xFE, mpu.sp)
# PHP
def test_php_pushes_processor_status_and_updates_sp(self):
for flags in range(0x100):
mpu = self._make_mpu()
mpu.p = flags | mpu.BREAK | mpu.UNUSED
# $0000 PHP
mpu.memory[0x0000] = 0x08
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual((flags | mpu.BREAK | mpu.UNUSED),
mpu.memory[0x1FF])
self.assertEqual(0xFE, mpu.sp)
# PLA
def test_pla_pulls_top_byte_from_stack_into_a_and_updates_sp(self):
mpu = self._make_mpu()
# $0000 PLA
mpu.memory[0x0000] = 0x68
mpu.memory[0x01FF] = 0xAB
mpu.sp = 0xFE
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xFF, mpu.sp)
# PLP
def test_plp_pulls_top_byte_from_stack_into_flags_and_updates_sp(self):
mpu = self._make_mpu()
# $0000 PLP
mpu.memory[0x0000] = 0x28
mpu.memory[0x01FF] = 0xBA # must have BREAK and UNUSED set
mpu.sp = 0xFE
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xBA, mpu.p)
self.assertEqual(0xFF, mpu.sp)
# ROL Accumulator
def test_rol_accumulator_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p &= ~(mpu.CARRY)
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_accumulator_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x80
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_accumulator_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p |= mpu.CARRY
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x01, mpu.a)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_accumulator_sets_n_flag(self):
mpu = self._make_mpu()
mpu.a = 0x40
mpu.p |= mpu.CARRY
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x81, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_accumulator_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0x7F
mpu.p &= ~(mpu.CARRY)
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFE, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_accumulator_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0xFF
mpu.p &= ~(mpu.CARRY)
# $0000 ROL A
mpu.memory[0x0000] = 0x2A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xFE, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROL Absolute
def test_rol_absolute_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_absolute_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_absolute_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p |= mpu.CARRY
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_absolute_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_absolute_shifts_out_one(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD
self._write(mpu.memory, 0x0000, (0x2E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0xABCD])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROL Zero Page
def test_rol_zp_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p |= mpu.CARRY
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_sets_n_flag(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_zp_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_zp_shifts_out_one(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010
self._write(mpu.memory, 0x0000, (0x26, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0x0010])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROL Absolute, X-Indexed
def test_rol_abs_x_indexed_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.x = 0x03
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_abs_x_indexed_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
mpu.x = 0x03
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_abs_x_indexed_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x01, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_abs_x_indexed_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_abs_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_abs_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $ABCD,X
self._write(mpu.memory, 0x0000, (0x3E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROL Zero Page, X-Indexed
def test_rol_zp_x_indexed_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x36, 0x10))
# $0000 ROL $0010,X
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_x_indexed_80_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
mpu.p &= ~(mpu.ZERO)
mpu.x = 0x03
self._write(mpu.memory, 0x0000, (0x36, 0x10))
# $0000 ROL $0010,X
mpu.memory[0x0010 + mpu.x] = 0x80
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_x_indexed_zero_and_carry_one_clears_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
self._write(mpu.memory, 0x0000, (0x36, 0x10))
# $0000 ROL $0010,X
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x01, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_rol_zp_x_indexed_sets_n_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROL $0010,X
self._write(mpu.memory, 0x0000, (0x36, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x40
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
def test_rol_zp_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010,X
self._write(mpu.memory, 0x0000, (0x36, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x7F
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_rol_zp_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROL $0010,X
self._write(mpu.memory, 0x0000, (0x36, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFE, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Accumulator
def test_ror_accumulator_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p &= ~(mpu.CARRY)
# $0000 ROR A
mpu.memory[0x0000] = 0x6A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_accumulator_zero_and_carry_one_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.p |= mpu.CARRY
# $0000 ROR A
mpu.memory[0x0000] = 0x6A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_accumulator_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.a = 0x02
mpu.p |= mpu.CARRY
# $0000 ROR A
mpu.memory[0x0000] = 0x6A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x81, mpu.a)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_accumulator_shifts_out_one(self):
mpu = self._make_mpu()
mpu.a = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR A
mpu.memory[0x0000] = 0x6A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x81, mpu.a)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Absolute
def test_ror_absolute_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROR $ABCD
self._write(mpu.memory, 0x0000, (0x6E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_absolute_zero_and_carry_one_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD
self._write(mpu.memory, 0x0000, (0x6E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD
self._write(mpu.memory, 0x0000, (0x6E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x02
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_absolute_shifts_out_one(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD
self._write(mpu.memory, 0x0000, (0x6E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x03
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Zero Page
def test_ror_zp_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 ROR $0010
self._write(mpu.memory, 0x0000, (0x66, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_zp_zero_and_carry_one_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $0010
self._write(mpu.memory, 0x0000, (0x66, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_zp_zero_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $0010
self._write(mpu.memory, 0x0000, (0x66, 0x10))
mpu.memory[0x0010] = 0x02
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_zp_shifts_out_one(self):
mpu = self._make_mpu()
mpu.p |= mpu.CARRY
# $0000 ROR $0010
self._write(mpu.memory, 0x0000, (0x66, 0x10))
mpu.memory[0x0010] = 0x03
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Absolute, X-Indexed
def test_ror_abs_x_indexed_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROR $ABCD,X
self._write(mpu.memory, 0x0000, (0x7E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_abs_x_indexed_z_and_c_1_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD,X
self._write(mpu.memory, 0x0000, (0x7E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x80, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_abs_x_indexed_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD,X
self._write(mpu.memory, 0x0000, (0x7E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x02
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_abs_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $ABCD,X
self._write(mpu.memory, 0x0000, (0x7E, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x03
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x81, mpu.memory[0xABCD + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# ROR Zero Page, X-Indexed
def test_ror_zp_x_indexed_zero_and_carry_zero_sets_z_flag(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p &= ~(mpu.CARRY)
# $0000 ROR $0010,X
self._write(mpu.memory, 0x0000, (0x76, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_ror_zp_x_indexed_zero_and_carry_one_rotates_in_sets_n_flags(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $0010,X
self._write(mpu.memory, 0x0000, (0x76, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x80, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_ror_zp_x_indexed_zero_absolute_shifts_out_zero(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $0010,X
self._write(mpu.memory, 0x0000, (0x76, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x02
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_ror_zp_x_indexed_shifts_out_one(self):
mpu = self._make_mpu()
mpu.x = 0x03
mpu.p |= mpu.CARRY
# $0000 ROR $0010,X
self._write(mpu.memory, 0x0000, (0x76, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x03
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x81, mpu.memory[0x0010 + mpu.x])
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# RTI
def test_rti_restores_status_and_pc_and_updates_sp(self):
mpu = self._make_mpu()
# $0000 RTI
mpu.memory[0x0000] = 0x40
self._write(mpu.memory, 0x01FD, (0xFC, 0x03, 0xC0)) # Status, PCL, PCH
mpu.sp = 0xFC
mpu.step()
self.assertEqual(0xC003, mpu.pc)
self.assertEqual(0xFC, mpu.p)
self.assertEqual(0xFF, mpu.sp)
def test_rti_forces_break_and_unused_flags_high(self):
mpu = self._make_mpu()
# $0000 RTI
mpu.memory[0x0000] = 0x40
self._write(mpu.memory, 0x01FD, (0x00, 0x03, 0xC0)) # Status, PCL, PCH
mpu.sp = 0xFC
mpu.step()
self.assertEqual(mpu.BREAK, mpu.p & mpu.BREAK)
self.assertEqual(mpu.UNUSED, mpu.p & mpu.UNUSED)
# RTS
def test_rts_restores_pc_and_increments_then_updates_sp(self):
mpu = self._make_mpu()
# $0000 RTS
mpu.memory[0x0000] = 0x60
self._write(mpu.memory, 0x01FE, (0x03, 0xC0)) # PCL, PCH
mpu.pc = 0x0000
mpu.sp = 0xFD
mpu.step()
self.assertEqual(0xC004, mpu.pc)
self.assertEqual(0xFF, mpu.sp)
def test_rts_wraps_around_top_of_memory(self):
mpu = self._make_mpu()
# $1000 RTS
mpu.memory[0x1000] = 0x60
self._write(mpu.memory, 0x01FE, (0xFF, 0xFF)) # PCL, PCH
mpu.pc = 0x1000
mpu.sp = 0xFD
mpu.step()
self.assertEqual(0x0000, mpu.pc)
self.assertEqual(0xFF, mpu.sp)
# SBC Absolute
def test_sbc_abs_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC $ABCD
self._write(mpu.memory, 0x0000, (0xED, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Zero Page
def test_sbc_zp_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $10
self._write(mpu.memory, 0x0000, (0xE5, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $10
self._write(mpu.memory, 0x0000, (0xE5, 0x10))
mpu.memory[0x0010] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# => SBC $10
self._write(mpu.memory, 0x0000, (0xE5, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# => SBC $10
self._write(mpu.memory, 0x0000, (0xE5, 0x10))
mpu.memory[0x0010] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Immediate
def test_sbc_imm_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xE9, 0x00))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_imm_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC #$01
self._write(mpu.memory, 0x0000, (0xE9, 0x01))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_imm_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xE9, 0x00))
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_imm_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC #$02
self._write(mpu.memory, 0x0000, (0xE9, 0x02))
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
def test_sbc_bcd_on_immediate_0a_minus_00_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.CARRY
mpu.a = 0x0a
# $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xe9, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x0a, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_sbc_bcd_on_immediate_9a_minus_00_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.CARRY
mpu.a = 0x9a
#$0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xe9, 0x00))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x9a, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
def test_sbc_bcd_on_immediate_00_minus_01_carry_set(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.p |= mpu.OVERFLOW
mpu.p |= mpu.ZERO
mpu.p |= mpu.CARRY
mpu.a = 0x00
# => $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xe9, 0x01))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x99, mpu.a)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(0, mpu.p & mpu.CARRY)
def test_sbc_bcd_on_immediate_20_minus_0a_carry_unset(self):
mpu = self._make_mpu()
mpu.p |= mpu.DECIMAL
mpu.a = 0x20
# $0000 SBC #$00
self._write(mpu.memory, 0x0000, (0xe9, 0x0a))
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x1f, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.OVERFLOW)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# SBC Absolute, X-Indexed
def test_sbc_abs_x_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_x_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
mpu.memory[0xFEED] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_x_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_x_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC $FEE0,X
self._write(mpu.memory, 0x0000, (0xFD, 0xE0, 0xFE))
mpu.x = 0x0D
mpu.memory[0xFEED] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Absolute, Y-Indexed
def test_sbc_abs_y_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $FEE0,Y
self._write(mpu.memory, 0x0000, (0xF9, 0xE0, 0xFE))
mpu.y = 0x0D
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_y_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $FEE0,Y
self._write(mpu.memory, 0x0000, (0xF9, 0xE0, 0xFE))
mpu.y = 0x0D
mpu.memory[0xFEED] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_y_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC $FEE0,Y
self._write(mpu.memory, 0x0000, (0xF9, 0xE0, 0xFE))
mpu.y = 0x0D
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_abs_y_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC $FEE0,Y
self._write(mpu.memory, 0x0000, (0xF9, 0xE0, 0xFE))
mpu.y = 0x0D
mpu.memory[0xFEED] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Indirect, Indexed (X)
def test_sbc_ind_x_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC ($10,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xE1, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.x = 0x03
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_x_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC ($10,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xE1, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.x = 0x03
mpu.memory[0xFEED] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_x_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC ($10,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xE1, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.x = 0x03
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_x_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC ($10,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xE1, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.x = 0x03
mpu.memory[0xFEED] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Indexed, Indirect (Y)
def test_sbc_ind_y_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
mpu.y = 0x03
# $0000 SBC ($10),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xF1, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_y_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC ($10),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xF1, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_y_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC ($10),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xF1, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_ind_y_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC ($10),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0xF1, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SBC Zero Page, X-Indexed
def test_sbc_zp_x_all_zeros_and_no_borrow_is_zero(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x00
# $0000 SBC $10,X
self._write(mpu.memory, 0x0000, (0xF5, 0x10))
mpu.x = 0x0D
mpu.memory[0x001D] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_x_downto_zero_no_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p |= mpu.CARRY # borrow = 0
mpu.a = 0x01
# $0000 SBC $10,X
self._write(mpu.memory, 0x0000, (0xF5, 0x10))
mpu.x = 0x0D
mpu.memory[0x001D] = 0x01
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_x_downto_zero_with_borrow_sets_z_clears_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x01
# $0000 SBC $10,X
self._write(mpu.memory, 0x0000, (0xF5, 0x10))
mpu.x = 0x0D
mpu.memory[0x001D] = 0x00
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(mpu.CARRY, mpu.CARRY)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
def test_sbc_zp_x_downto_four_with_borrow_clears_z_n(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
mpu.p &= ~(mpu.CARRY) # borrow = 1
mpu.a = 0x07
# $0000 SBC $10,X
self._write(mpu.memory, 0x0000, (0xF5, 0x10))
mpu.x = 0x0D
mpu.memory[0x001D] = 0x02
mpu.step()
self.assertEqual(0x04, mpu.a)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
self.assertEqual(0, mpu.p & mpu.ZERO)
self.assertEqual(mpu.CARRY, mpu.CARRY)
# SEC
def test_sec_sets_carry_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.CARRY)
# $0000 SEC
mpu.memory[0x0000] = 0x038
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(mpu.CARRY, mpu.p & mpu.CARRY)
# SED
def test_sed_sets_decimal_mode_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.DECIMAL)
# $0000 SED
mpu.memory[0x0000] = 0xF8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(mpu.DECIMAL, mpu.p & mpu.DECIMAL)
# SEI
def test_sei_sets_interrupt_disable_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.INTERRUPT)
# $0000 SEI
mpu.memory[0x0000] = 0x78
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(mpu.INTERRUPT, mpu.p & mpu.INTERRUPT)
# STA Absolute
def test_sta_absolute_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
# $0000 STA $ABCD
self._write(mpu.memory, 0x0000, (0x8D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_absolute_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
# $0000 STA $ABCD
self._write(mpu.memory, 0x0000, (0x8D, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Zero Page
def test_sta_zp_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
# $0000 STA $0010
self._write(mpu.memory, 0x0000, (0x85, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_zp_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
# $0000 STA $0010
self._write(mpu.memory, 0x0000, (0x85, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Absolute, X-Indexed
def test_sta_abs_x_indexed_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.x = 0x03
# $0000 STA $ABCD,X
self._write(mpu.memory, 0x0000, (0x9D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_abs_x_indexed_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 STA $ABCD,X
self._write(mpu.memory, 0x0000, (0x9D, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.x])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Absolute, Y-Indexed
def test_sta_abs_y_indexed_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.y = 0x03
# $0000 STA $ABCD,Y
self._write(mpu.memory, 0x0000, (0x99, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD + mpu.y])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_abs_y_indexed_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0x03
# $0000 STA $ABCD,Y
self._write(mpu.memory, 0x0000, (0x99, 0xCD, 0xAB))
mpu.memory[0xABCD + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD + mpu.y])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Indirect, Indexed (X)
def test_sta_ind_indexed_x_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.x = 0x03
# $0000 STA ($0010,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0x81, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.memory[0xFEED] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xFEED])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_ind_indexed_x_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 STA ($0010,X)
# $0013 Vector to $FEED
self._write(mpu.memory, 0x0000, (0x81, 0x10))
self._write(mpu.memory, 0x0013, (0xED, 0xFE))
mpu.memory[0xFEED] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xFEED])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Indexed, Indirect (Y)
def test_sta_indexed_ind_y_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.y = 0x03
# $0000 STA ($0010),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0x91, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xFEED + mpu.y])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_indexed_ind_y_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0x03
# $0000 STA ($0010),Y
# $0010 Vector to $FEED
self._write(mpu.memory, 0x0000, (0x91, 0x10))
self._write(mpu.memory, 0x0010, (0xED, 0xFE))
mpu.memory[0xFEED + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xFEED + mpu.y])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STA Zero Page, X-Indexed
def test_sta_zp_x_indexed_stores_a_leaves_a_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.a = 0xFF
mpu.x = 0x03
# $0000 STA $0010,X
self._write(mpu.memory, 0x0000, (0x95, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0xFF, mpu.a)
self.assertEqual(flags, mpu.p)
def test_sta_zp_x_indexed_stores_a_leaves_a_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0x03
# $0000 STA $0010,X
self._write(mpu.memory, 0x0000, (0x95, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0x00, mpu.a)
self.assertEqual(flags, mpu.p)
# STX Absolute
def test_stx_absolute_stores_x_leaves_x_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.x = 0xFF
# $0000 STX $ABCD
self._write(mpu.memory, 0x0000, (0x8E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(0xFF, mpu.x)
self.assertEqual(flags, mpu.p)
def test_stx_absolute_stores_x_leaves_x_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.x = 0x00
# $0000 STX $ABCD
self._write(mpu.memory, 0x0000, (0x8E, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(0x00, mpu.x)
self.assertEqual(flags, mpu.p)
# STX Zero Page
def test_stx_zp_stores_x_leaves_x_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.x = 0xFF
# $0000 STX $0010
self._write(mpu.memory, 0x0000, (0x86, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(0xFF, mpu.x)
self.assertEqual(flags, mpu.p)
def test_stx_zp_stores_x_leaves_x_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.x = 0x00
# $0000 STX $0010
self._write(mpu.memory, 0x0000, (0x86, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(0x00, mpu.x)
self.assertEqual(flags, mpu.p)
# STX Zero Page, Y-Indexed
def test_stx_zp_y_indexed_stores_x_leaves_x_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.x = 0xFF
mpu.y = 0x03
# $0000 STX $0010,Y
self._write(mpu.memory, 0x0000, (0x96, 0x10))
mpu.memory[0x0010 + mpu.y] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.y])
self.assertEqual(0xFF, mpu.x)
self.assertEqual(flags, mpu.p)
def test_stx_zp_y_indexed_stores_x_leaves_x_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.x = 0x00
mpu.y = 0x03
# $0000 STX $0010,Y
self._write(mpu.memory, 0x0000, (0x96, 0x10))
mpu.memory[0x0010 + mpu.y] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.y])
self.assertEqual(0x00, mpu.x)
self.assertEqual(flags, mpu.p)
# STY Absolute
def test_sty_absolute_stores_y_leaves_y_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.y = 0xFF
# $0000 STY $ABCD
self._write(mpu.memory, 0x0000, (0x8C, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0xABCD])
self.assertEqual(0xFF, mpu.y)
self.assertEqual(flags, mpu.p)
def test_sty_absolute_stores_y_leaves_y_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.y = 0x00
# $0000 STY $ABCD
self._write(mpu.memory, 0x0000, (0x8C, 0xCD, 0xAB))
mpu.memory[0xABCD] = 0xFF
mpu.step()
self.assertEqual(0x0003, mpu.pc)
self.assertEqual(0x00, mpu.memory[0xABCD])
self.assertEqual(0x00, mpu.y)
self.assertEqual(flags, mpu.p)
# STY Zero Page
def test_sty_zp_stores_y_leaves_y_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.y = 0xFF
# $0000 STY $0010
self._write(mpu.memory, 0x0000, (0x84, 0x10))
mpu.memory[0x0010] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010])
self.assertEqual(0xFF, mpu.y)
self.assertEqual(flags, mpu.p)
def test_sty_zp_stores_y_leaves_y_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.y = 0x00
# $0000 STY $0010
self._write(mpu.memory, 0x0000, (0x84, 0x10))
mpu.memory[0x0010] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010])
self.assertEqual(0x00, mpu.y)
self.assertEqual(flags, mpu.p)
# STY Zero Page, X-Indexed
def test_sty_zp_x_indexed_stores_y_leaves_y_and_n_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.NEGATIVE)
mpu.y = 0xFF
mpu.x = 0x03
# $0000 STY $0010,X
self._write(mpu.memory, 0x0000, (0x94, 0x10))
mpu.memory[0x0010 + mpu.x] = 0x00
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0xFF, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0xFF, mpu.y)
self.assertEqual(flags, mpu.p)
def test_sty_zp_x_indexed_stores_y_leaves_y_and_z_flag_unchanged(self):
mpu = self._make_mpu()
mpu.p = flags = 0xFF & ~(mpu.ZERO)
mpu.y = 0x00
mpu.x = 0x03
# $0000 STY $0010,X
self._write(mpu.memory, 0x0000, (0x94, 0x10))
mpu.memory[0x0010 + mpu.x] = 0xFF
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x00, mpu.memory[0x0010 + mpu.x])
self.assertEqual(0x00, mpu.y)
self.assertEqual(flags, mpu.p)
# TAX
def test_tax_transfers_accumulator_into_x(self):
mpu = self._make_mpu()
mpu.a = 0xAB
mpu.x = 0x00
# $0000 TAX
mpu.memory[0x0000] = 0xAA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.x)
def test_tax_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x80
mpu.x = 0x00
# $0000 TAX
mpu.memory[0x0000] = 0xAA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_tax_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.x = 0xFF
# $0000 TAX
mpu.memory[0x0000] = 0xAA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# TAY
def test_tay_transfers_accumulator_into_y(self):
mpu = self._make_mpu()
mpu.a = 0xAB
mpu.y = 0x00
# $0000 TAY
mpu.memory[0x0000] = 0xA8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.y)
def test_tay_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.a = 0x80
mpu.y = 0x00
# $0000 TAY
mpu.memory[0x0000] = 0xA8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_tay_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.a = 0x00
mpu.y = 0xFF
# $0000 TAY
mpu.memory[0x0000] = 0xA8
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# TSX
def test_tsx_transfers_stack_pointer_into_x(self):
mpu = self._make_mpu()
mpu.sp = 0xAB
mpu.x = 0x00
# $0000 TSX
mpu.memory[0x0000] = 0xBA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.sp)
self.assertEqual(0xAB, mpu.x)
def test_tsx_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.sp = 0x80
mpu.x = 0x00
# $0000 TSX
mpu.memory[0x0000] = 0xBA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.sp)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_tsx_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.sp = 0x00
mpu.y = 0xFF
# $0000 TSX
mpu.memory[0x0000] = 0xBA
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.sp)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# TXA
def test_txa_transfers_x_into_a(self):
mpu = self._make_mpu()
mpu.x = 0xAB
mpu.a = 0x00
# $0000 TXA
mpu.memory[0x0000] = 0x8A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.x)
def test_txa_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.x = 0x80
mpu.a = 0x00
# $0000 TXA
mpu.memory[0x0000] = 0x8A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0x80, mpu.x)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_txa_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.x = 0x00
mpu.a = 0xFF
# $0000 TXA
mpu.memory[0x0000] = 0x8A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.a)
self.assertEqual(0x00, mpu.x)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# TXS
def test_txs_transfers_x_into_stack_pointer(self):
mpu = self._make_mpu()
mpu.x = 0xAB
# $0000 TXS
mpu.memory[0x0000] = 0x9A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.sp)
self.assertEqual(0xAB, mpu.x)
def test_txs_does_not_set_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.x = 0x80
# $0000 TXS
mpu.memory[0x0000] = 0x9A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.sp)
self.assertEqual(0x80, mpu.x)
self.assertEqual(0, mpu.p & mpu.NEGATIVE)
def test_txs_does_not_set_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.x = 0x00
# $0000 TXS
mpu.memory[0x0000] = 0x9A
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x00, mpu.sp)
self.assertEqual(0x00, mpu.x)
self.assertEqual(0, mpu.p & mpu.ZERO)
# TYA
def test_tya_transfers_y_into_a(self):
mpu = self._make_mpu()
mpu.y = 0xAB
mpu.a = 0x00
# $0000 TYA
mpu.memory[0x0000] = 0x98
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0xAB, mpu.a)
self.assertEqual(0xAB, mpu.y)
def test_tya_sets_negative_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.NEGATIVE)
mpu.y = 0x80
mpu.a = 0x00
# $0000 TYA
mpu.memory[0x0000] = 0x98
mpu.step()
self.assertEqual(0x0001, mpu.pc)
self.assertEqual(0x80, mpu.a)
self.assertEqual(0x80, mpu.y)
self.assertEqual(mpu.NEGATIVE, mpu.p & mpu.NEGATIVE)
def test_tya_sets_zero_flag(self):
mpu = self._make_mpu()
mpu.p &= ~(mpu.ZERO)
mpu.y = 0x00
mpu.a = 0xFF
# $0000 TYA
mpu.memory[0x0000] = 0x98
mpu.step()
self.assertEqual(0x00, mpu.a)
self.assertEqual(0x00, mpu.y)
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
self.assertEqual(0x0001, mpu.pc)
def test_decorated_addressing_modes_are_valid(self):
valid_modes = [x[0] for x in py65.assembler.Assembler.Addressing]
mpu = self._make_mpu()
for name, mode in mpu.disassemble:
self.assertTrue(mode in valid_modes)
def test_brk_interrupt(self):
mpu = self._make_mpu()
mpu.p = 0x00
self._write(mpu.memory, 0xFFFE, (0x00, 0x04))
self._write(mpu.memory, 0x0000, (0xA9, 0x01, # LDA #$01
0x00, 0xEA, # BRK + skipped byte
0xEA, 0xEA, # NOP, NOP
0xA9, 0x03)) # LDA #$03
self._write(mpu.memory, 0x0400, (0xA9, 0x02, # LDA #$02
0x40)) # RTI
mpu.step() # LDA #$01
self.assertEqual(0x01, mpu.a)
self.assertEqual(0x0002, mpu.pc)
mpu.step() # BRK
self.assertEqual(0x0400, mpu.pc)
mpu.step() # LDA #$02
self.assertEqual(0x02, mpu.a)
self.assertEqual(0x0402, mpu.pc)
mpu.step() # RTI
self.assertEqual(0x0004, mpu.pc)
mpu.step() # A NOP
mpu.step() # The second NOP
mpu.step() # LDA #$03
self.assertEqual(0x03, mpu.a)
self.assertEqual(0x0008, mpu.pc)
# Test Helpers
def _write(self, memory, start_address, bytes):
memory[start_address:start_address + len(bytes)] = bytes
def _make_mpu(self, *args, **kargs):
klass = self._get_target_class()
mpu = klass(*args, **kargs)
if 'memory' not in kargs:
mpu.memory = 0x10000 * [0xAA]
return mpu
def _get_target_class(self):
raise NotImplementedError("Target class not specified")
class MPUTests(unittest.TestCase, Common6502Tests):
""" NMOS 6502 tests """
def test_repr(self):
mpu = self._make_mpu()
self.assertTrue("6502" in repr(mpu))
# ADC Indirect, Indexed (X)
def test_adc_ind_indexed_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.p = 0x00
mpu.a = 0x01
mpu.x = 0xFF
# $0000 ADC ($80,X)
# $007f Vector to $BBBB (read if page wrapped)
# $017f Vector to $ABCD (read if no page wrap)
self._write(mpu.memory, 0x0000, (0x61, 0x80))
self._write(mpu.memory, 0x007f, (0xBB, 0xBB))
self._write(mpu.memory, 0x017f, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x01
mpu.memory[0xBBBB] = 0x02
mpu.step()
self.assertEqual(0x03, mpu.a)
# ADC Indexed, Indirect (Y)
def test_adc_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.p = 0
mpu.a = 0x42
mpu.y = 0x02
# $1000 ADC ($FF),Y
self._write(mpu.memory, 0x1000, (0x71, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x14 # read if no page wrap
mpu.memory[0x0012] = 0x42 # read if page wrapped
mpu.step()
self.assertEqual(0x84, mpu.a)
# LDA Zero Page, X-Indexed
def test_lda_zp_x_indexed_page_wraps(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0xFF
# $0000 LDA $80,X
self._write(mpu.memory, 0x0000, (0xB5, 0x80))
mpu.memory[0x007F] = 0x42
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x42, mpu.a)
# AND Indexed, Indirect (Y)
def test_and_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.a = 0x42
mpu.y = 0x02
# $1000 AND ($FF),Y
self._write(mpu.memory, 0x1000, (0x31, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x00 # read if no page wrap
mpu.memory[0x0012] = 0xFF # read if page wrapped
mpu.step()
self.assertEqual(0x42, mpu.a)
# BRK
def test_brk_preserves_decimal_flag_when_it_is_set(self):
mpu = self._make_mpu()
mpu.p = mpu.DECIMAL
# $C000 BRK
mpu.memory[0xC000] = 0x00
mpu.pc = 0xC000
mpu.step()
self.assertEqual(mpu.BREAK, mpu.p & mpu.BREAK)
self.assertEqual(mpu.DECIMAL, mpu.p & mpu.DECIMAL)
def test_brk_preserves_decimal_flag_when_it_is_clear(self):
mpu = self._make_mpu()
mpu.p = 0
# $C000 BRK
mpu.memory[0xC000] = 0x00
mpu.pc = 0xC000
mpu.step()
self.assertEqual(mpu.BREAK, mpu.p & mpu.BREAK)
self.assertEqual(0, mpu.p & mpu.DECIMAL)
# CMP Indirect, Indexed (X)
def test_cmp_ind_x_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.p = 0
mpu.a = 0x42
mpu.x = 0xFF
# $0000 CMP ($80,X)
# $007f Vector to $BBBB (read if page wrapped)
# $017f Vector to $ABCD (read if no page wrap)
self._write(mpu.memory, 0x0000, (0xC1, 0x80))
self._write(mpu.memory, 0x007f, (0xBB, 0xBB))
self._write(mpu.memory, 0x017f, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.memory[0xBBBB] = 0x42
mpu.step()
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# CMP Indexed, Indirect (Y)
def test_cmp_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.p = 0
mpu.a = 0x42
mpu.y = 0x02
# $1000 CMP ($FF),Y
self._write(mpu.memory, 0x1000, (0xd1, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x14 # read if no page wrap
mpu.memory[0x0012] = 0x42 # read if page wrapped
mpu.step()
self.assertEqual(mpu.ZERO, mpu.p & mpu.ZERO)
# EOR Indirect, Indexed (X)
def test_eor_ind_x_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.p = 0
mpu.a = 0xAA
mpu.x = 0xFF
# $0000 EOR ($80,X)
# $007f Vector to $BBBB (read if page wrapped)
# $017f Vector to $ABCD (read if no page wrap)
self._write(mpu.memory, 0x0000, (0x41, 0x80))
self._write(mpu.memory, 0x007f, (0xBB, 0xBB))
self._write(mpu.memory, 0x017f, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x00
mpu.memory[0xBBBB] = 0xFF
mpu.step()
self.assertEqual(0x55, mpu.a)
# EOR Indexed, Indirect (Y)
def test_eor_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.a = 0xAA
mpu.y = 0x02
# $1000 EOR ($FF),Y
self._write(mpu.memory, 0x1000, (0x51, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x00 # read if no page wrap
mpu.memory[0x0012] = 0xFF # read if page wrapped
mpu.step()
self.assertEqual(0x55, mpu.a)
# LDA Indirect, Indexed (X)
def test_lda_ind_indexed_x_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0xff
# $0000 LDA ($80,X)
# $007f Vector to $BBBB (read if page wrapped)
# $017f Vector to $ABCD (read if no page wrap)
self._write(mpu.memory, 0x0000, (0xA1, 0x80))
self._write(mpu.memory, 0x007f, (0xBB, 0xBB))
self._write(mpu.memory, 0x017f, (0xCD, 0xAB))
mpu.memory[0xABCD] = 0x42
mpu.memory[0xBBBB] = 0xEF
mpu.step()
self.assertEqual(0xEF, mpu.a)
# LDA Indexed, Indirect (Y)
def test_lda_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.a = 0x00
mpu.y = 0x02
# $1000 LDA ($FF),Y
self._write(mpu.memory, 0x1000, (0xb1, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x14 # read if no page wrap
mpu.memory[0x0012] = 0x42 # read if page wrapped
mpu.step()
self.assertEqual(0x42, mpu.a)
# LDA Zero Page, X-Indexed
def test_lda_zp_x_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.a = 0x00
mpu.x = 0xFF
# $0000 LDA $80,X
self._write(mpu.memory, 0x0000, (0xB5, 0x80))
mpu.memory[0x007F] = 0x42
mpu.step()
self.assertEqual(0x0002, mpu.pc)
self.assertEqual(0x42, mpu.a)
# JMP Indirect
def test_jmp_jumps_to_address_with_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.memory[0x00ff] = 0
# $0000 JMP ($00)
self._write(mpu.memory, 0, (0x6c, 0xff, 0x00))
mpu.step()
self.assertEqual(0x6c00, mpu.pc)
self.assertEqual(5, mpu.processorCycles)
# ORA Indexed, Indirect (Y)
def test_ora_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.a = 0x00
mpu.y = 0x02
# $1000 ORA ($FF),Y
self._write(mpu.memory, 0x1000, (0x11, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x00 # read if no page wrap
mpu.memory[0x0012] = 0x42 # read if page wrapped
mpu.step()
self.assertEqual(0x42, mpu.a)
# SBC Indexed, Indirect (Y)
def test_sbc_indexed_ind_y_has_page_wrap_bug(self):
mpu = self._make_mpu()
mpu.pc = 0x1000
mpu.p = mpu.CARRY
mpu.a = 0x42
mpu.y = 0x02
# $1000 SBC ($FF),Y
self._write(mpu.memory, 0x1000, (0xf1, 0xff))
# Vector
mpu.memory[0x00ff] = 0x10 # low byte
mpu.memory[0x0100] = 0x20 # high byte if no page wrap
mpu.memory[0x0000] = 0x00 # high byte if page wrapped
# Data
mpu.memory[0x2012] = 0x02 # read if no page wrap
mpu.memory[0x0012] = 0x03 # read if page wrapped
mpu.step()
self.assertEqual(0x3f, mpu.a)
def _get_target_class(self):
return py65.devices.mpu6502.MPU
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
mkeller0815/py65
|
py65/tests/devices/test_mpu6502.py
|
Python
|
bsd-3-clause
| 203,687
|
# -*- coding: utf-8 -*-
from django.db import models
from django.core.urlresolvers import reverse
from irpmemr.models import CommonModel
class PatientVisit(CommonModel):
patient = models.OneToOneField('patient.PatientInformation')
first_visit = models.DateTimeField(null=True, blank=True)
second_visit = models.DateTimeField(null=True, blank=True)
third_visit = models.DateTimeField(null=True, blank=True)
fourth_visit = models.DateTimeField(null=True, blank=True)
fifth_visit = models.DateTimeField(null=True, blank=True)
other_visit = models.DateTimeField(null=True, blank=True)
def __unicode__(self):
return str(self.patient.get_fullname())
class HospitalAdmission(CommonModel):
patient = models.ForeignKey('patient.PatientInformation')
hospital_name = models.CharField(max_length=50)
date_of_admission = models.DateTimeField("Date of admission")
date_of_discharge = models.DateTimeField("Date of discharge")
def __unicode__(self):
return str(self.patient.get_fullname())
def get_absolute_url(self):
return reverse('patient_admission_view', args=[self.patient.id, self.id])
|
aazhbd/medical_info01
|
admission/models.py
|
Python
|
bsd-3-clause
| 1,166
|
from json import loads
from cms.api import create_page
from cms.models import Page
from cms.models.fields import PageField
from django.core.files import File as DjangoFile
from django.core.files.temp import NamedTemporaryFile
from django.db import models
from django.utils.encoding import force_bytes, python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from filer.fields.folder import FilerFolderField
from filer.models import File, Folder
from cms_articles.api import add_content, create_article, publish_article
from cms_articles.conf import settings
from .utils import create_redirect
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
@python_2_unicode_compatible
class Author(models.Model):
author_id = models.IntegerField(_('author id'), unique=True)
login = models.CharField(_('login name'), max_length=255)
email = models.EmailField(_('email'), blank=True, null=True)
first_name = models.CharField(_('first name'), max_length=255, blank=True, null=True)
last_name = models.CharField(_('last name'), max_length=255, blank=True, null=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, verbose_name=_('user'),
related_name='+', on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return '{}'.format(self.login)
class Meta:
verbose_name = _('author')
verbose_name_plural = _('authors')
@python_2_unicode_compatible
class Category(models.Model):
term_id = models.IntegerField(_('term id'), unique=True)
name = models.CharField(_('name'), max_length=255)
slug = models.SlugField(_('slug'))
parent = models.CharField(_('parent slug'), max_length=255, blank=True, null=True)
cached_name = models.CharField(_('name'), max_length=512, blank=True, null=True)
category = models.ForeignKey(
'cms_articles.Category', verbose_name=_('articles category'),
related_name='+', on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
try:
parent = Category.objects.get(slug=self.parent)
except Category.DoesNotExist:
parent = None
if parent:
name = '{} / {}'.format(parent.name, self.name)
else:
name = '{}'.format(self.name)
if name != self.cached_name:
self.cached_name = name
self.save()
return name
class Meta:
verbose_name = _('category')
verbose_name_plural = _('categories')
@python_2_unicode_compatible
class Item(models.Model):
title = models.TextField(_('title'), default='')
link = models.CharField(_('link'), max_length=255)
pub_date = models.DateTimeField(_('publication date'))
created_by = models.ForeignKey(Author, verbose_name=_('created by'))
guid = models.CharField(_('url'), max_length=255)
description = models.TextField(_('description'))
content = models.TextField(_('content'))
excerpt = models.TextField(_('excerpt'))
post_id = models.IntegerField(_('post id'), unique=True)
post_date = models.DateTimeField(_('post date'))
post_name = models.CharField(_('post name'), max_length=255)
status = models.CharField(_('status'), max_length=20)
post_parent = models.IntegerField(_('parent post id'))
post_type = models.CharField(_('type'), max_length=20)
categories = models.ManyToManyField(Category, _('categories'), blank=True)
postmeta = models.TextField(_('metadata'))
article = models.OneToOneField(
'cms_articles.Article', verbose_name=_('imported article'),
related_name='+', on_delete=models.SET_NULL, blank=True, null=True)
page = models.OneToOneField(
'cms.Page', verbose_name=_('imported page'),
related_name='+', on_delete=models.SET_NULL, blank=True, null=True)
file = models.OneToOneField(
File, verbose_name=_('imported file'),
related_name='+', on_delete=models.SET_NULL, blank=True, null=True)
folder = models.ForeignKey(
Folder, verbose_name=_('attachments folder'),
related_name='+', on_delete=models.SET_NULL, blank=True, null=True)
def __str__(self):
return '{}'.format(self.title)
class Meta:
verbose_name = _('item')
verbose_name_plural = _('items')
@property
def children(self):
return Item.objects.filter(post_parent=self.post_id)
@cached_property
def parent(self):
if self.post_parent:
try:
return Item.objects.get(post_id=self.post_parent)
except Item.DoesNotExist:
pass
return None
@cached_property
def meta(self):
return loads(self.postmeta)
def cms_import(self, options):
obj = None
if self.post_type == 'post':
obj = self.get_or_import_article(options)
elif self.post_type == 'page':
obj = self.get_or_import_page(options)
elif self.post_type == 'attachment':
obj = self.get_or_import_file(options)
# also import children
for child in self.children.all():
child.cms_import(options)
return obj
def get_or_import_article(self, options):
assert self.post_type == 'post'
if self.article:
return self.article
# import thumbnail
image = None
if '_thumbnail_id' in self.meta:
image_item = Item.objects.get(post_id=int(self.meta['_thumbnail_id']))
image = image_item.get_or_import_file(options)
self.article = create_article(
tree=options.article_tree,
template=options.article_template,
title=self.title,
language=options.language,
description=self.excerpt,
created_by=self.created_by.user or self.created_by.login,
image=image,
publicationdate=self.pub_date,
categories=[c.category for c in self.categories.exclude(category=None)],
)
self.article.creation_date = self.post_date
self.article.save()
content = '\n'.join('<p>{}</p>'.format(p) for p in self.content.split('\n\n'))
add_content(self.article, language=options.language, slot=options.article_slot, content=content)
if options.article_publish:
self.article = publish_article(
article=self.article,
language=options.language,
changed_by=self.created_by.user or self.created_by.login,
)
public = self.article.get_public_object()
public.creation_date = self.pub_date
public.save()
if options.article_redirects:
create_redirect(self.link, self.article.get_absolute_url())
self.save()
return self.article
def get_or_import_page(self, options):
assert self.post_type == 'page'
if self.page:
return self.page
# import parent page first
if self.parent:
parent = self.parent.get_or_import_page(options)
else:
parent = options.page_root
# get valid slug
slug = self.post_name or slugify(self.title)
assert slug
# handle existing page
self.page = Page.objects.filter(parent=parent, title_set__slug=slug).first()
if self.page:
self.save()
return self.page
# create new page
self.page = create_page(
template=options.page_template,
language=options.language,
title=self.title,
slug=slug,
meta_description=None,
created_by=self.created_by.user or self.created_by.login,
parent=parent,
publication_date=self.pub_date,
)
self.page.creation_date = self.post_date
self.page.save()
content = '\n'.join('<p>{}</p>'.format(p) for p in self.content.split('\n\n'))
add_content(self.page, language=options.language, slot=options.page_slot, content=content)
if options.page_publish:
self.page.publish(options.language)
public = self.page.get_public_object()
public.creation_date = self.pub_date
public.save()
if options.page_redirects:
create_redirect(self.link, self.page.get_absolute_url())
self.save()
return self.page
def get_or_import_file(self, options):
from filer.management.commands.import_files import FileImporter
assert self.post_type == 'attachment'
if self.file:
return self.file
# download content into deleted temp_file
temp_file = NamedTemporaryFile(delete=True)
temp_file.write(urlopen(force_bytes(self.guid)).read())
temp_file.flush()
# create DjangoFile object
django_file = DjangoFile(temp_file, name=self.guid.split('/')[-1])
# choose folder
if self.parent:
folder = self.parent.get_or_create_folder(options)
else:
folder = options.file_folder
# import file
self.file = FileImporter().import_file(file_obj=django_file, folder=folder)
# set date and owner
self.file.created_at = self.pub_date
self.file.owner = self.created_by.user
self.file.save()
# return imported file
self.save()
return self.file
def get_or_create_folder(self, options):
assert self.children.count() > 0
if self.folder:
return self.folder
# do not create sub-folders for slides
if self.post_type == 'slide':
self.folder = options.slide_folder
self.save()
return self.folder
parent = options.get_folder(self.post_type)
self.folder, new = Folder.objects.get_or_create(parent=parent, name=self.title)
if new:
self.folder.created_at = self.post_date
self.folder.owner = self.created_by.user
self.folder.save()
self.save()
return self.folder
@python_2_unicode_compatible
class Options(models.Model):
name = models.CharField(_('name'), max_length=255, unique=True)
# global options
language = models.CharField(_('language'), max_length=15, help_text=_('The language of the content fields.'))
# article specific options
article_tree = models.ForeignKey(
Page, verbose_name=_('tree'), related_name='+',
help_text=_('All posts will be imported as articles in this tree.'),
limit_choices_to={
'publisher_is_draft': False,
'application_urls': 'CMSArticlesApp',
'node__site_id': settings.SITE_ID,
})
article_template = models.CharField(
_('template'), max_length=100,
choices=settings.CMS_ARTICLES_TEMPLATES,
default=settings.CMS_ARTICLES_TEMPLATES[0][0])
article_slot = models.CharField(
_('slot'), max_length=255, default=settings.CMS_ARTICLES_SLOT,
help_text=_('The name of placeholder used to create content plugins in.'))
article_folder = FilerFolderField(
verbose_name=_('attachments folder'), related_name='+', on_delete=models.SET_NULL, blank=True, null=True,
help_text=_('Select folder for articles. Subfolder will be created for each article with attachments.'))
article_redirects = models.BooleanField(
_('create redirects'), default=True,
help_text=_('Create django redirects for each article from the old path to the new imported path'))
article_publish = models.BooleanField(_('publish'), default=False, help_text=_('Publish imported articles.'))
# page specific options
page_root = PageField(
verbose_name=_('root'), related_name='+', on_delete=models.SET_NULL, blank=True, null=True,
help_text=_('All pages will be imported as sub-pages of this page.'))
page_template = models.CharField(
_('template'), max_length=100,
choices=Page.template_choices, default=Page.TEMPLATE_DEFAULT)
page_slot = models.CharField(
_('slot'), max_length=255, default='content',
help_text=_('The name of placeholder used to create content plugins in.'))
page_folder = FilerFolderField(
verbose_name=_('attachments folder'), related_name='+', on_delete=models.SET_NULL, blank=True, null=True,
help_text=_('Select folder for pages. Subfolder will be created for each page with attachments.'))
page_redirects = models.BooleanField(
_('create redirects'), default=True,
help_text=_('Create django redirects for each page from the old path to the new imported path'))
page_publish = models.BooleanField(_('publish'), default=False, help_text=_('Publish imported pages.'))
# file specific options
gallery_folder = FilerFolderField(
verbose_name=_('folder'), related_name='+', on_delete=models.SET_NULL, blank=True, null=True,
help_text=_('Select folder for galleries. Subfolder will be created for each gallery.'))
# file specific options
slide_folder = FilerFolderField(
verbose_name=_('folder'), related_name='+', on_delete=models.SET_NULL, blank=True, null=True,
help_text=_('Select folder for slides.'))
# file specific options
file_folder = FilerFolderField(
verbose_name=_('folder'), related_name='+', on_delete=models.SET_NULL, blank=True, null=True,
help_text=_('Select folder for other attachments.'),
)
def __str__(self):
return '{}'.format(self.name)
class Meta:
verbose_name = _('options')
verbose_name_plural = _('options')
@cached_property
def folders(self):
return {
'post': self.article_folder,
'page': self.page_folder,
'gallery': self.gallery_folder,
'slide': self.slide_folder,
}
def get_folder(self, post_type):
return self.folders.get(post_type, self.file_folder)
|
misli/django-cms-articles
|
cms_articles/import_wordpress/models.py
|
Python
|
bsd-3-clause
| 14,142
|
# Django settings for dojo project.
import os
DEBUG = True
LOGIN_REDIRECT_URL = '/'
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTOCOL', 'https')
# SECURE_SSL_REDIRECT = True
# SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_HTTPONLY = True
CSRF_COOKIE_HTTPONLY = True
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
URL_PREFIX = ''
# Uncomment this line if you enable SSL
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
ADMINS = (
('Your Name', 'your.name@yourdomain')
)
MANAGERS = ADMINS
DOJO_ROOT = 'DOJODIR'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2',
# 'mysql','sqlite3' or 'oracle'.
'NAME': 'MYSQLDB', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'MYSQLUSER',
'PASSWORD': 'MYSQLPWD',
'HOST': 'MYSQLHOST', # Empty for localhost through domain sockets
# or '127.0.0.1' for localhost through TCP.
'PORT': 'MYSQLPORT', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = 'DOJO_MEDIA_ROOT'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = "DOJO_STATIC_ROOT"
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'djangobower.finders.BowerFinder',
)
FILE_UPLOAD_HANDLERS = ("django.core.files.uploadhandler.TemporaryFileUploadHandler",)
# where should bower install components
# collect static will move them to the static root
BOWER_COMPONENTS_ROOT = 'BOWERDIR'
# what components should be installed
BOWER_INSTALLED_APPS = (
'fontawesome',
'https://github.com/BlackrockDigital/startbootstrap-sb-admin-2.git',
'fullcalendar',
'jquery-cookie',
'jquery-ui',
'jquery-highlight',
# directly from github since no bower comp available
'https://github.com/jumjum123/JUMFlot.git',
'https://github.com/markrcote/flot-axislabels.git',
'chosen',
'chosen-bootstrap',
'bootswatch-dist#readable',
'bootstrap-wysiwyg-steveathon',
'justgage'
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'DOJOSECRET'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.middleware.security.SecurityMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'dojo.middleware.LoginRequiredMiddleware',
)
ROOT_URLCONF = 'dojo.urls'
LOGIN_URL = '/login'
LOGIN_EXEMPT_URLS = (
r'^%sstatic/' % URL_PREFIX,
r'^%swebhook/' % URL_PREFIX,
r'^%smetrics/all$' % URL_PREFIX,
r'^%smetrics$' % URL_PREFIX,
r'^%smetrics/product/type/(?P<mtype>\d+)$' % URL_PREFIX,
r'^%smetrics/simple$' % URL_PREFIX,
r'^%sapi/v1/' % URL_PREFIX,
r'^%sajax/v1/' % URL_PREFIX,
r'^%sreports/cover$' % URL_PREFIX,
r'^%sfinding/image/(?P<token>[^/]+)$' % URL_PREFIX
)
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'dojo.wsgi.application'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'polymorphic', # provides admin templates
'overextends',
'django.contrib.admin',
'django.contrib.humanize',
'gunicorn',
'tastypie',
'djangobower',
'auditlog',
'dojo',
'tastypie_swagger',
'watson',
'tagging',
'custom_field',
'imagekit',
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtpout.your_domain.com'
EMAIL_PORT = '25'
EMAIL_USE_TLS = True
PORT_SCAN_CONTACT_EMAIL = 'email@your_host'
PORT_SCAN_RESULT_EMAIL_FROM = 'email@your_host'
PORT_SCAN_EXTERNAL_UNIT_EMAIL_LIST = ['email@your_host']
PORT_SCAN_SOURCE_IP = '127.0.0.1'
# Used in a few places to prefix page headings and in email
# salutations
TEAM_NAME = 'Security Engineering'
# Celery settings
BROKER_URL = 'sqla+sqlite:///dojo.celerydb.sqlite'
CELERY_SEND_TASK_ERROR_EMAILS = True
CELERY_IGNORE_RESULT = True
CELERY_TIMEZONE = TIME_ZONE
CELERY_TASK_RESULT_EXPIRES = 86400
CELERYBEAT_SCHEDULE_FILENAME = DOJO_ROOT + '/dojo.celery.beat.db'
CELERY_ACCEPT_CONTENT = ['pickle', 'json', 'msgpack', 'yaml']
# wkhtmltopdf settings
WKHTMLTOPDF_PATH = '/usr/local/bin/wkhtmltopdf'
# django-tagging settings
FORCE_LOWERCASE_TAGS = True
MAX_TAG_LENGTH = 25
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '[%(asctime)s] %(levelname)s [%(name)s:%(lineno)d] %(message)s',
'datefmt': '%d/%b/%Y %H:%M:%S',
},
'simple': {
'format': '%(levelname)s %(funcName)s %(lineno)d %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
grendel513/django-DefectDojo
|
dojo/settings.dist.py
|
Python
|
bsd-3-clause
| 8,264
|
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Module defining the YAMLConnector class."""
import os
driver = True
try:
import yaml
except ImportError:
driver = False
from dc.connector import DataConnector
from dc import exceptions
from dc.yaml.configuration import YAMLConfiguration
from dc.yaml.driver import YAMLDriver
from dc.yaml.query_manager import YAMLQueryManager
from dc.yaml.repository_manager import YAMLRepositoryManager
from model import exceptions as mod_exceptions
from model.functions import *
class YAMLConnector(DataConnector):
"""Data connector for YAML.
This data connector should read and write datas in YML format, using
the yaml library.
A very short example:
# Table: users
- id: 1
username: admin
email_address: admin@python-aboard.org
"""
name = "yaml"
configuration = YAMLConfiguration
driver = YAMLDriver
query_manager = YAMLQueryManager
repository_manager = YAMLRepositoryManager
|
v-legoff/pa-poc3
|
src/dc/yaml/connector.py
|
Python
|
bsd-3-clause
| 2,500
|
#!/usr/bin/env python3
import numpy
from copy import deepcopy
visitedStates = []
nodeNumber = 0
class State():
"""Lol"""
def __init__(self, floor, genM, chM):
self.floor = floor
self.genM = genM
self.chM = chM
self.pairVector = numpy.sum(numpy.logical_and(genM, chM), 1)
self.singleChipVector = numpy.sum(chM, 1) - self.pairVector
self.singleGenVector = numpy.sum(genM, 1) - self.pairVector
self.singleAllVector = self.singleChipVector + self.singleGenVector
def __eq__(self, other):
# return ((self.pairVector == other.pairVector).all() and
# (self.singleChipVector == other.singleChipVector).all() and
# (self.singleGenVector == other.singleGenVector).all())
return ((self.singleAllVector == other.singleAllVector).all() and
#(self.pairVector == other.pairVector).all() and
(self.singleChipVector == other.singleChipVector).all() and
# (self.genM == other.genM).all() and
# (self.chM == other.chM).all() and
(self.singleGenVector == other.singleGenVector).all() and
(self.floor == other.floor)) # and
# (self.singleGenVector == other.singleGenVector).all())
class Node():
def __init__(self, depth, floor, genM, chM, parentNode):
self.depth = depth
self.floor = floor
self.genM = deepcopy(genM)
self.chM = deepcopy(chM)
self.visited = False
if parentNode is not None:
self.parentNode = parentNode
else:
self.parentNode = self
global nodeNumber
self.nodeNumber = nodeNumber
nodeNumber += 1
self.childs = []
# print('New node: # {}'.format(self.nodeNumber))
# visitedMatrices.append(self.genM+self.chM)
if numpy.sum(self.chM) + numpy.sum(self.genM) != 14:
print(self.nodeNumber, self.floor)
print('genM, chM')
print(self.genM)
print(self.chM)
raise Exception("something is wrong!")
if self.isWinner():
raise Exception("found a winner at it. {}".format(self.depth))
def isSafe(self):
chipsWithProtection = numpy.multiply(self.genM, self.chM)
chipsWithoutProtection = self.chM-chipsWithProtection
vulnerableChipNumberPerFloor = numpy.sum(chipsWithoutProtection, 1)
generatorNumberPerFloor = numpy.sum(self.genM, 1)
if self.nodeNumber == 2:
# print(' node {} info: '.format(self.nodeNumber))
# print(self.genM)
# print(self.chM)
# input()
pass
if ((numpy.multiply(vulnerableChipNumberPerFloor,
generatorNumberPerFloor)).any()):
return False
else:
if (self.floor == self.parentNode.parentNode.floor and
(self.chM == self.parentNode.parentNode.chM).all() and
(self.genM == self.parentNode.parentNode.genM).all()):
# print('bop')
return False
aState = State(self.floor, self.genM, self.chM)
if aState in visitedStates:
return False
else:
visitedStates.append(aState)
return True
def isWinner(self):
return (numpy.sum(self.chM, 1)[3] == 7 and
numpy.sum(self.genM, 1)[3] == 7)
def grow(self):
if not self.visited:
possibleFloors = [self.floor+1, self.floor-1]
if possibleFloors[0] > 3:
possibleFloors.pop(0)
elif possibleFloors[1] < 0:
possibleFloors.pop(1)
# 1: try to move only a generator or two
for pos1 in range(7):
for pos2 in range(pos1, 7):
genPosVec = numpy.logical_or(numpy.array(range(7)) == pos1,
numpy.array(range(7)) == pos2)
genPosVec = genPosVec*1
if (numpy.sum(numpy.multiply(genPosVec,
self.genM[self.floor]))
== numpy.sum(genPosVec)).all():
subGenM = self.genM
# print(self.genM)
subGenM[self.floor] = self.genM[self.floor]-genPosVec
# print(subGenM)
for floor in possibleFloors:
subGenM[floor] = self.genM[floor]+genPosVec
newChild = Node(self.depth+1, floor, subGenM,
self.chM, self)
if newChild.isSafe():
self.childs.append(newChild)
subGenM[floor] = self.genM[floor]-genPosVec
subGenM[self.floor] = self.genM[self.floor]+genPosVec
# 2: try to move only a chip
for pos1 in range(7):
for pos2 in range(pos1, 7):
chPosVec = (numpy.array(range(7)) == pos1)*1
chPosVec = numpy.logical_or(numpy.array(range(7)) == pos1,
numpy.array(range(7)) == pos2)
chPosVec = chPosVec*1
if (numpy.sum(numpy.multiply(chPosVec,
self.chM[self.floor]))
== numpy.sum(chPosVec)).all():
subChM = self.chM
# print(self.genM)
subChM[self.floor] = self.chM[self.floor]-chPosVec
# print(subGenM)
for floor in possibleFloors:
subChM[floor] = self.chM[floor]+chPosVec
newChild = Node(self.depth+1, floor, self.genM,
subChM, self)
if newChild.isSafe():
self.childs.append(newChild)
subChM[floor] = self.chM[floor]-chPosVec
subChM[self.floor] = self.chM[self.floor]+chPosVec
# 3: try to move a chip and a generator
for pos1 in range(7):
for pos2 in range(7):
genPosVec = (numpy.array(range(7)) == pos1)*1
chPosVec = (numpy.array(range(7)) == pos2)*1
if (numpy.multiply(genPosVec,
self.genM[self.floor]).any() and
numpy.multiply(chPosVec, self.chM[self.floor]).any()):
subGenM = self.genM
subChM = self.chM
subGenM[self.floor] = self.genM[self.floor]-genPosVec
subChM[self.floor] = self.chM[self.floor]-chPosVec
for floor in possibleFloors:
subGenM[floor] = self.genM[floor]+genPosVec
subChM[floor] = self.chM[floor]+chPosVec
newChild = Node(self.depth+1, floor, subGenM,
subChM, self)
if newChild.isSafe():
self.childs.append(newChild)
subGenM[floor] = self.genM[floor]-genPosVec
subChM[floor] = self.chM[floor]-chPosVec
subGenM[self.floor] = self.genM[self.floor]+genPosVec
subChM[self.floor] = self.chM[self.floor]+chPosVec
global treatedNode
treatedNode += 1
# print('finished treating node {}'.format(self.nodeNumber))
# print('number of children: {}'.format(len(self.childs)))
# print('parent node: {}'.format(self.parentNode.nodeNumber))
# print(self.genM)
# print(self.chM)
self.visited = True
else:
# print("I am node {} with {} childs".format(self.nodeNumber,
# len(self.childs)))
# print(self.genM)
# print(self.chM)
for child in self.childs:
child.grow()
# promethium, cobalt, curium, ruthenium, plutonium
theInputGenerators = [[True, False, False, False, False, True, True],
[False, True, True, True, True, False, False],
[False, False, False, False, False, False, False],
[False, False, False, False, False, False, False]]
theInputChips = [[True, False, False, False, False, True, True],
[False, False, False, False, False, False, False],
[False, True, True, True, True, False, False],
[False, False, False, False, False, False, False]]
tIG = numpy.array(theInputGenerators, dtype=int)
tIC = numpy.array(theInputChips, dtype=int)
currentFloor = 1
rootNode = Node(0, 0, tIG, tIC, None)
treatedNode = 0
for i in range(60):
print('iteration {}'.format(i))
treatedNode = 0
rootNode.grow()
if treatedNode == 0:
print('done at iteration {}'.format(i))
break
|
znuxor/aoc2016
|
11b.py
|
Python
|
bsd-3-clause
| 9,276
|
from rest_framework.serializers import ModelSerializer, WritableField
from openbudget.apps.contexts.models import Context
class ContextBaseSerializer(ModelSerializer):
"""Base Context serializer, exposing our defaults for contexts."""
data = WritableField()
class Meta:
model = Context
|
nborwankar/open-budgets
|
openbudget/apps/contexts/serializers.py
|
Python
|
bsd-3-clause
| 310
|
# -*- coding: utf-8 -*-
import sys
import os
import os.path
# installation directory is relative to current script location
# DIRINST=os.path.dirname(sys.argv[0])+"/../"
def usage():
MSG("usage mkax first_step step_incr n_step [ax_unit]")
DST=CURDATA()
if len(sys.argv)<4:
usage()
EXIT()
if len(sys.argv)==5:
axunit=sys.argv[4]
else:
axunit=None
try:
p1=float(sys.argv[1])
except ValueError:
MSG("First argument %s must be convertible to a float" % (sys.argv[1] ,))
usage()
EXIT()
try:
p2=float(sys.argv[2])
except ValueError:
MSG("second argument %s must be convertible to a float" % (sys.argv[2],))
usage()
EXIT()
try:
n=int(sys.argv[3])
except ValueError:
MSG("Third argument %s must be convertible to an int" % (sys.argv[2],))
usage()
EXIT()
axleft=p1-0.5*p2
axright=p1+(n-0.5)*p2
PUTPAR("1s AXLEFT",str(axleft))
PUTPAR("1s AXRIGHT",str(axright))
if axunit:
PUTPAR("1s AXUNIT",axunit)
RE(DST)
|
jtrebosc/JTutils
|
TSpy/mkax.py
|
Python
|
bsd-3-clause
| 961
|
"""
Next steps:
- translate the tests so that they comply with Hematite's URL
- path and query string autounquoting
- relative paths autoresolution
- add more positive tests (all of the tests below are for valid urls)
- add negative tests
"""
from hematite.url import URL
TEST_DATA = u"""\
# Based on https://github.com/w3c/web-platform-tests/blob/master/url/urltestdata.txt
# which is based on http://trac.webkit.org/browser/trunk/LayoutTests/fast/url/
#http://example.com/././foo about:blank s:http h:example.com p:/foo # not sure what the about:blank is about.
http://example.com/./.foo s:http h:example.com p:/.foo
http://example.com/foo/. s:http h:example.com p:/foo/
http://example.com/foo/./ s:http h:example.com p:/foo/
http://example.com/foo/bar/.. s:http h:example.com p:/foo/
http://example.com/foo/bar/../ s:http h:example.com p:/foo/
http://example.com/foo/..bar s:http h:example.com p:/foo/..bar
http://example.com/foo/bar/../ton s:http h:example.com p:/foo/ton
http://example.com/foo/bar/../ton/../../a s:http h:example.com p:/a
http://example.com/foo/../../.. s:http h:example.com p:/
http://example.com/foo/../../../ton s:http h:example.com p:/ton
http://example.com/foo/%2e s:http h:example.com p:/foo/
http://example.com/foo/%2e%2 s:http h:example.com p:/foo/%2e%2
http://example.com/foo/%2e./%2e%2e/.%2e/%2e.bar s:http h:example.com p:/%2e.bar
http://example.com////../.. s:http h:example.com p://
http://example.com/foo/bar//../.. s:http h:example.com p:/foo/
http://example.com/foo/bar//.. s:http h:example.com p:/foo/bar/
http://example.com/foo s:http h:example.com p:/foo
http://example.com/%20foo s:http h:example.com p:/%20foo
http://example.com/foo% s:http h:example.com p:/foo%
http://example.com/foo%2 s:http h:example.com p:/foo%2
http://example.com/foo%2zbar s:http h:example.com p:/foo%2zbar
http://example.com/foo%2\u00C2\u00A9zbar s:http h:example.com p:/foo%2%C3%82%C2%A9zbar
http://example.com/foo%41%7a s:http h:example.com p:/foo%41%7a
#http://example.com/foo\t\u0091%91 s:http h:example.com p:/foo%C2%91%91 # 91 is an invalid utf8 starting byte so %91 decoding fails.
http://example.com/foo%00%51 s:http h:example.com p:/foo%00%51
http://example.com/(%28:%3A%29) s:http h:example.com p:/(%28:%3A%29)
http://example.com/%3A%3a%3C%3c s:http h:example.com p:/%3A%3a%3C%3c
http://example.com/foo\tbar s:http h:example.com p:/foobar
http://example.com\\\\foo\\\\bar s:http h:example.com p://foo//bar
http://example.com/%7Ffp3%3Eju%3Dduvgw%3Dd s:http h:example.com p:/%7Ffp3%3Eju%3Dduvgw%3Dd
http://example.com/@asdf%40 s:http h:example.com p:/@asdf%40
http://example.com/\u4F60\u597D\u4F60\u597D s:http h:example.com p:/%E4%BD%A0%E5%A5%BD%E4%BD%A0%E5%A5%BD
http://example.com/\u2025/foo s:http h:example.com p:/%E2%80%A5/foo
http://example.com/\uFEFF/foo s:http h:example.com p:/%EF%BB%BF/foo
http://example.com/\u202E/foo/\u202D/bar s:http h:example.com p:/%E2%80%AE/foo/%E2%80%AD/bar
"""
RES_FIELD_MAP = {'s': 'scheme',
'h': 'host',
'p': 'path',
'port': 'port',
'q': 'query',
'f': 'fragment'}
def parse_test(test_str):
input_str, _, result_str = test_str.partition(' ')
if not result_str:
return None # failed test or invalid format
rfs = result_str.split()
results = {} # 'scheme': rfs[0]}
for field in rfs:
name, _, value = field.partition(':')
results[RES_FIELD_MAP[name]] = value
results['input'] = input_str
return results
def run_url_tests(data=TEST_DATA):
for line in TEST_DATA.splitlines():
if not line or line.startswith('#'):
continue
parsed_test = parse_test(line)
url = URL(parsed_test['input'])
print parsed_test, url
for k, v in parsed_test.items():
if k == 'input':
continue
url_value = getattr(url, k, None)
if url_value is not None:
print '-', k, v, url_value
if __name__ == '__main__':
run_url_tests()
|
mahmoud/hematite
|
hematite/tests/test_url_w3c.py
|
Python
|
bsd-3-clause
| 4,082
|
import contextlib
import warnings
import weakref
import xml.etree.ElementTree
from itertools import product
from operator import add
import numpy as np
import pandas as pd
import pytest
from pandas.io.formats import format as pandas_format
import dask
import dask.array as da
import dask.dataframe as dd
import dask.dataframe.groupby
from dask.base import compute_as_if_collection
from dask.blockwise import fuse_roots
from dask.dataframe import _compat, methods
from dask.dataframe._compat import (
PANDAS_GT_110,
PANDAS_GT_120,
PANDAS_GT_140,
PANDAS_GT_150,
tm,
)
from dask.dataframe.core import (
Scalar,
_concat,
_map_freq_to_period_start,
aca,
has_parallel_type,
is_broadcastable,
repartition_divisions,
total_mem_usage,
)
from dask.dataframe.utils import assert_eq, assert_max_deps, make_meta
from dask.datasets import timeseries
from dask.utils import M, is_dataframe_like, is_series_like, put_lines
from dask.utils_test import hlg_layer
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 2, 1]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [0, 0, 0]}, index=[9, 9, 9]),
}
meta = make_meta(
{"a": "i8", "b": "i8"}, index=pd.Index([], "i8"), parent_meta=pd.DataFrame()
)
d = dd.DataFrame(dsk, "x", meta, [0, 5, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
def test_dataframe_doc():
doc = d.add.__doc__
disclaimer = "Some inconsistencies with the Dask version may exist."
assert disclaimer in doc
def test_dataframe_doc_from_non_pandas():
class Foo:
def foo(self):
"""This is a new docstring that I just made up
Parameters:
----------
None
"""
d._bind_operator_method("foo", Foo.foo, original=Foo)
try:
doc = d.foo.__doc__
disclaimer = "Some inconsistencies with the Dask version may exist."
assert disclaimer in doc
assert "new docstring that I just made up" in doc
finally:
# make sure to clean up this alteration of the dd.DataFrame class
del dd.DataFrame.foo
def test_Dataframe():
expected = pd.Series(
[2, 3, 4, 5, 6, 7, 8, 9, 10], index=[0, 1, 3, 5, 6, 8, 9, 9, 9], name="a"
)
assert_eq(d["a"] + 1, expected)
tm.assert_index_equal(d.columns, pd.Index(["a", "b"]))
assert_eq(d[d["b"] > 2], full[full["b"] > 2])
assert_eq(d[["a", "b"]], full[["a", "b"]])
assert_eq(d.a, full.a)
assert d.b.mean().compute() == full.b.mean()
assert np.allclose(d.b.var().compute(), full.b.var())
assert np.allclose(d.b.std().compute(), full.b.std())
assert d.index._name == d.index._name # this is deterministic
assert repr(d)
def test_head_tail():
assert_eq(d.head(2), full.head(2))
assert_eq(d.head(3), full.head(3))
assert_eq(d.head(2), dsk[("x", 0)].head(2))
assert_eq(d["a"].head(2), full["a"].head(2))
assert_eq(d["a"].head(3), full["a"].head(3))
assert_eq(d["a"].head(2), dsk[("x", 0)]["a"].head(2))
assert sorted(d.head(2, compute=False).dask) == sorted(
d.head(2, compute=False).dask
)
assert sorted(d.head(2, compute=False).dask) != sorted(
d.head(3, compute=False).dask
)
assert_eq(d.tail(2), full.tail(2))
assert_eq(d.tail(3), full.tail(3))
assert_eq(d.tail(2), dsk[("x", 2)].tail(2))
assert_eq(d["a"].tail(2), full["a"].tail(2))
assert_eq(d["a"].tail(3), full["a"].tail(3))
assert_eq(d["a"].tail(2), dsk[("x", 2)]["a"].tail(2))
assert sorted(d.tail(2, compute=False).dask) == sorted(
d.tail(2, compute=False).dask
)
assert sorted(d.tail(2, compute=False).dask) != sorted(
d.tail(3, compute=False).dask
)
def test_head_npartitions():
assert_eq(d.head(5, npartitions=2), full.head(5))
assert_eq(d.head(5, npartitions=2, compute=False), full.head(5))
assert_eq(d.head(5, npartitions=-1), full.head(5))
assert_eq(d.head(7, npartitions=-1), full.head(7))
assert_eq(d.head(2, npartitions=-1), full.head(2))
with pytest.raises(ValueError):
d.head(2, npartitions=5)
def test_head_npartitions_warn():
match = "5 elements requested, only 3 elements"
with pytest.warns(UserWarning, match=match):
d.head(5)
match = "Insufficient elements"
with pytest.warns(UserWarning, match=match):
d.head(100)
with pytest.warns(UserWarning, match=match):
d.head(7)
with pytest.warns(UserWarning, match=match):
d.head(7, npartitions=2)
# No warn if all partitions are inspected
for n in [3, -1]:
with pytest.warns(None) as rec:
d.head(10, npartitions=n)
assert not rec
# With default args, this means that a 1 partition dataframe won't warn
d2 = dd.from_pandas(pd.DataFrame({"x": [1, 2, 3]}), npartitions=1)
with pytest.warns(None) as rec:
d2.head()
assert not rec
def test_index_head():
assert_eq(d.index.head(2), full.index[:2])
assert_eq(d.index.head(3), full.index[:3])
def test_Series():
assert isinstance(d.a, dd.Series)
assert isinstance(d.a + 1, dd.Series)
assert_eq((d + 1), full + 1)
def test_Index():
for case in [
pd.DataFrame(np.random.randn(10, 5), index=list("abcdefghij")),
pd.DataFrame(
np.random.randn(10, 5),
index=pd.date_range("2011-01-01", freq="D", periods=10),
),
]:
ddf = dd.from_pandas(case, 3)
assert_eq(ddf.index, case.index)
pytest.raises(AttributeError, lambda: ddf.index.index)
def test_axes():
pdf = pd.DataFrame({"col1": [1, 2], "col2": [3, 4]})
df = dd.from_pandas(pdf, npartitions=2)
assert len(df.axes) == len(pdf.axes)
assert all(assert_eq(d, p) for d, p in zip(df.axes, pdf.axes))
def test_series_axes():
ps = pd.Series(["abcde"])
ds = dd.from_pandas(ps, npartitions=2)
assert len(ds.axes) == len(ps.axes)
assert all(assert_eq(d, p) for d, p in zip(ds.axes, ps.axes))
def test_Scalar():
val = np.int64(1)
s = Scalar({("a", 0): val}, "a", "i8")
assert hasattr(s, "dtype")
assert "dtype" in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, dtype=int64>"
val = pd.Timestamp("2001-01-01")
s = Scalar({("a", 0): val}, "a", val)
assert not hasattr(s, "dtype")
assert "dtype" not in dir(s)
assert_eq(s, val)
assert repr(s) == "dd.Scalar<a, type=Timestamp>"
def test_scalar_raises():
val = np.int64(1)
s = Scalar({("a", 0): val}, "a", "i8")
msg = "cannot be converted to a boolean value"
with pytest.raises(TypeError, match=msg):
bool(s)
def test_attributes():
assert "a" in dir(d)
assert "foo" not in dir(d)
pytest.raises(AttributeError, lambda: d.foo)
df = dd.from_pandas(pd.DataFrame({"a b c": [1, 2, 3]}), npartitions=2)
assert "a b c" not in dir(df)
df = dd.from_pandas(pd.DataFrame({"a": [1, 2], 5: [1, 2]}), npartitions=2)
assert "a" in dir(df)
assert 5 not in dir(df)
df = dd.from_pandas(_compat.makeTimeDataFrame(), npartitions=3)
pytest.raises(AttributeError, lambda: df.foo)
def test_column_names():
tm.assert_index_equal(d.columns, pd.Index(["a", "b"]))
tm.assert_index_equal(d[["b", "a"]].columns, pd.Index(["b", "a"]))
assert d["a"].name == "a"
assert (d["a"] + 1).name == "a"
assert (d["a"] + d["b"]).name is None
def test_columns_named_divisions_and_meta():
# https://github.com/dask/dask/issues/7599
df = pd.DataFrame(
{"_meta": [1, 2, 3, 4], "divisions": ["a", "b", "c", "d"]},
index=[0, 1, 3, 5],
)
ddf = dd.from_pandas(df, 2)
assert ddf.divisions == (0, 3, 5)
assert_eq(ddf["divisions"], df.divisions)
assert all(ddf._meta.columns == ["_meta", "divisions"])
assert_eq(ddf["_meta"], df._meta)
def test_index_names():
assert d.index.name is None
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name="x")
df = pd.DataFrame(np.random.randn(10, 5), idx)
ddf = dd.from_pandas(df, 3)
assert ddf.index.name == "x"
assert ddf.index.compute().name == "x"
@pytest.mark.skipif(dd._compat.PANDAS_GT_130, reason="Freq no longer included in ts")
@pytest.mark.parametrize(
"npartitions",
[
1,
pytest.param(
2,
marks=pytest.mark.xfail(
not dd._compat.PANDAS_GT_110, reason="Fixed upstream."
),
),
],
)
def test_timezone_freq(npartitions):
s_naive = pd.Series(pd.date_range("20130101", periods=10))
s_aware = pd.Series(pd.date_range("20130101", periods=10, tz="US/Eastern"))
pdf = pd.DataFrame({"tz": s_aware, "notz": s_naive})
ddf = dd.from_pandas(pdf, npartitions=npartitions)
assert pdf.tz[0].freq == ddf.compute().tz[0].freq == ddf.tz.compute()[0].freq
def test_rename_columns():
# GH 819
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
ddf.columns = ["x", "y"]
df.columns = ["x", "y"]
tm.assert_index_equal(ddf.columns, pd.Index(["x", "y"]))
tm.assert_index_equal(ddf._meta.columns, pd.Index(["x", "y"]))
assert_eq(ddf, df)
msg = r"Length mismatch: Expected axis has 2 elements, new values have 4 elements"
with pytest.raises(ValueError) as err:
ddf.columns = [1, 2, 3, 4]
assert msg in str(err.value)
# Multi-index columns
df = pd.DataFrame({("A", "0"): [1, 2, 2, 3], ("B", 1): [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df.columns = ["x", "y"]
ddf.columns = ["x", "y"]
tm.assert_index_equal(ddf.columns, pd.Index(["x", "y"]))
tm.assert_index_equal(ddf._meta.columns, pd.Index(["x", "y"]))
assert_eq(ddf, df)
def test_rename_series():
# GH 819
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
ds = dd.from_pandas(s, 2)
s.name = "renamed"
ds.name = "renamed"
assert s.name == "renamed"
assert_eq(ds, s)
ind = s.index
dind = ds.index
ind.name = "renamed"
dind.name = "renamed"
assert ind.name == "renamed"
assert_eq(dind, ind)
def test_rename_series_method():
# Series name
s = pd.Series([1, 2, 3, 4, 5, 6, 7], name="x")
ds = dd.from_pandas(s, 2)
assert_eq(ds.rename("y"), s.rename("y"))
assert ds.name == "x" # no mutation
assert_eq(ds.rename(), s.rename())
assert_eq(ds, s)
def test_rename_series_method_2():
# Series index
s = pd.Series(["a", "b", "c", "d", "e", "f", "g"], name="x")
ds = dd.from_pandas(s, 2)
for is_sorted in [True, False]:
res = ds.rename(lambda x: x**2, sorted_index=is_sorted)
assert_eq(res, s.rename(lambda x: x**2))
assert res.known_divisions == is_sorted
res = ds.rename(s, sorted_index=is_sorted)
assert_eq(res, s.rename(s))
assert res.known_divisions == is_sorted
with pytest.raises(ValueError):
ds.rename(lambda x: -x, sorted_index=True)
assert_eq(ds.rename(lambda x: -x), s.rename(lambda x: -x))
res = ds.rename(ds)
assert_eq(res, s.rename(s))
assert not res.known_divisions
ds2 = ds.clear_divisions()
res = ds2.rename(lambda x: x**2, sorted_index=True)
assert_eq(res, s.rename(lambda x: x**2))
assert not res.known_divisions
res = ds.rename(lambda x: x**2, inplace=True, sorted_index=True)
assert res is ds
s.rename(lambda x: x**2, inplace=True)
assert_eq(ds, s)
@pytest.mark.parametrize(
"method,test_values", [("tdigest", (6, 10)), ("dask", (4, 20))]
)
def test_describe_numeric(method, test_values):
if method == "tdigest":
pytest.importorskip("crick")
# prepare test case which approx quantiles will be the same as actuals
s = pd.Series(list(range(test_values[1])) * test_values[0])
df = pd.DataFrame(
{
"a": list(range(test_values[1])) * test_values[0],
"b": list(range(test_values[0])) * test_values[1],
}
)
ds = dd.from_pandas(s, test_values[0])
ddf = dd.from_pandas(df, test_values[0])
test_quantiles = [0.25, 0.75]
assert_eq(df.describe(), ddf.describe(percentiles_method=method))
assert_eq(s.describe(), ds.describe(percentiles_method=method))
assert_eq(
df.describe(percentiles=test_quantiles),
ddf.describe(percentiles=test_quantiles, percentiles_method=method),
)
assert_eq(s.describe(), ds.describe(split_every=2, percentiles_method=method))
assert_eq(df.describe(), ddf.describe(split_every=2, percentiles_method=method))
# remove string columns
df = pd.DataFrame(
{
"a": list(range(test_values[1])) * test_values[0],
"b": list(range(test_values[0])) * test_values[1],
"c": list("abcdef"[: test_values[0]]) * test_values[1],
}
)
ddf = dd.from_pandas(df, test_values[0])
assert_eq(df.describe(), ddf.describe(percentiles_method=method))
assert_eq(df.describe(), ddf.describe(split_every=2, percentiles_method=method))
@pytest.mark.parametrize(
"include,exclude,percentiles,subset",
[
(None, None, None, ["c", "d"]), # numeric
(None, None, None, ["c", "d", "f"]), # numeric + timedelta
(None, None, None, ["c", "d", "g"]), # numeric + bool
(None, None, None, ["c", "d", "f", "g"]), # numeric + bool + timedelta
(None, None, None, ["f", "g"]), # bool + timedelta
("all", None, None, None),
(["number"], None, [0.25, 0.5], None),
([np.timedelta64], None, None, None),
(["number", "object"], None, [0.25, 0.75], None),
(None, ["number", "object"], None, None),
(["object", "datetime", "bool"], None, None, None),
],
)
def test_describe(include, exclude, percentiles, subset):
data = {
"a": ["aaa", "bbb", "bbb", None, None, "zzz"] * 2,
"c": [None, 0, 1, 2, 3, 4] * 2,
"d": [None, 0, 1] * 4,
"e": [
pd.Timestamp("2017-05-09 00:00:00.006000"),
pd.Timestamp("2017-05-09 00:00:00.006000"),
pd.Timestamp("2017-05-09 07:56:23.858694"),
pd.Timestamp("2017-05-09 05:59:58.938999"),
None,
None,
]
* 2,
"f": [
np.timedelta64(3, "D"),
np.timedelta64(1, "D"),
None,
None,
np.timedelta64(3, "D"),
np.timedelta64(1, "D"),
]
* 2,
"g": [True, False, True] * 4,
}
# Arrange
df = pd.DataFrame(data)
if subset is not None:
df = df.loc[:, subset]
ddf = dd.from_pandas(df, 2)
if PANDAS_GT_110:
datetime_is_numeric_kwarg = {"datetime_is_numeric": True}
else:
datetime_is_numeric_kwarg = {}
# Act
actual = ddf.describe(
include=include,
exclude=exclude,
percentiles=percentiles,
**datetime_is_numeric_kwarg,
)
expected = df.describe(
include=include,
exclude=exclude,
percentiles=percentiles,
**datetime_is_numeric_kwarg,
)
if "e" in expected and datetime_is_numeric_kwarg:
expected.at["mean", "e"] = np.nan
expected.dropna(how="all", inplace=True)
assert_eq(actual, expected)
# Check series
if subset is None:
for col in ["a", "c", "e", "g"]:
expected = df[col].describe(
include=include, exclude=exclude, **datetime_is_numeric_kwarg
)
if col == "e" and datetime_is_numeric_kwarg:
expected.drop("mean", inplace=True)
actual = ddf[col].describe(
include=include, exclude=exclude, **datetime_is_numeric_kwarg
)
assert_eq(expected, actual)
def test_describe_without_datetime_is_numeric():
data = {
"a": ["aaa", "bbb", "bbb", None, None, "zzz"] * 2,
"c": [None, 0, 1, 2, 3, 4] * 2,
"d": [None, 0, 1] * 4,
"e": [
pd.Timestamp("2017-05-09 00:00:00.006000"),
pd.Timestamp("2017-05-09 00:00:00.006000"),
pd.Timestamp("2017-05-09 07:56:23.858694"),
pd.Timestamp("2017-05-09 05:59:58.938999"),
None,
None,
]
* 2,
}
# Arrange
df = pd.DataFrame(data)
ddf = dd.from_pandas(df, 2)
# Assert
assert_eq(ddf.describe(), df.describe())
# Check series
for col in ["a", "c"]:
assert_eq(df[col].describe(), ddf[col].describe())
if PANDAS_GT_110:
with pytest.warns(
FutureWarning,
match=(
"Treating datetime data as categorical rather than numeric in `.describe` is deprecated"
),
):
ddf.e.describe()
else:
assert_eq(df.e.describe(), ddf.e.describe())
with pytest.raises(
NotImplementedError,
match="datetime_is_numeric=True is only supported for pandas >= 1.1.0",
):
ddf.e.describe(datetime_is_numeric=True)
def test_describe_empty():
df_none = pd.DataFrame({"A": [None, None]})
ddf_none = dd.from_pandas(df_none, 2)
df_len0 = pd.DataFrame({"A": [], "B": []})
ddf_len0 = dd.from_pandas(df_len0, 2)
ddf_nocols = dd.from_pandas(pd.DataFrame({}), 2)
# Pandas have different dtypes for resulting describe dataframe if there are only
# None-values, pre-compute dask df to bypass _meta check
assert_eq(
df_none.describe(), ddf_none.describe(percentiles_method="dask").compute()
)
with pytest.raises((ValueError, RuntimeWarning)):
ddf_len0.describe(percentiles_method="dask").compute()
with pytest.raises(ValueError):
ddf_nocols.describe(percentiles_method="dask").compute()
def test_describe_empty_tdigest():
pytest.importorskip("crick")
df_none = pd.DataFrame({"A": [None, None]})
ddf_none = dd.from_pandas(df_none, 2)
df_len0 = pd.DataFrame({"A": []})
ddf_len0 = dd.from_pandas(df_len0, 2)
ddf_nocols = dd.from_pandas(pd.DataFrame({}), 2)
# Pandas have different dtypes for resulting describe dataframe if there are only
# None-values, pre-compute dask df to bypass _meta check
assert_eq(
df_none.describe(), ddf_none.describe(percentiles_method="tdigest").compute()
)
with warnings.catch_warnings():
# dask.dataframe should probably filter this, to match pandas, but
# it seems quite difficult.
warnings.simplefilter("ignore", RuntimeWarning)
assert_eq(df_len0.describe(), ddf_len0.describe(percentiles_method="tdigest"))
assert_eq(df_len0.describe(), ddf_len0.describe(percentiles_method="tdigest"))
with pytest.raises(ValueError):
ddf_nocols.describe(percentiles_method="tdigest").compute()
def test_describe_for_possibly_unsorted_q():
"""make sure describe is sorting percentiles parameter, q, properly and can
handle lists, tuples and ndarrays.
See https://github.com/dask/dask/issues/4642.
"""
# prepare test case where quantiles should equal values
A = da.arange(0, 101)
ds = dd.from_dask_array(A)
for q in [None, [0.25, 0.50, 0.75], [0.25, 0.50, 0.75, 0.99], [0.75, 0.5, 0.25]]:
for f_convert in [list, tuple, np.array]:
if q is None:
r = ds.describe(percentiles=q).compute()
else:
r = ds.describe(percentiles=f_convert(q)).compute()
assert_eq(r["25%"], 25.0)
assert_eq(r["50%"], 50.0)
assert_eq(r["75%"], 75.0)
def test_cumulative():
index = [f"row{i:03d}" for i in range(100)]
df = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"), index=index)
df_out = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"), index=index)
ddf = dd.from_pandas(df, 5)
ddf_out = dd.from_pandas(df_out, 5)
assert_eq(ddf.cumsum(), df.cumsum())
assert_eq(ddf.cumprod(), df.cumprod())
assert_eq(ddf.cummin(), df.cummin())
assert_eq(ddf.cummax(), df.cummax())
assert_eq(ddf.cumsum(axis=1), df.cumsum(axis=1))
assert_eq(ddf.cumprod(axis=1), df.cumprod(axis=1))
assert_eq(ddf.cummin(axis=1), df.cummin(axis=1))
assert_eq(ddf.cummax(axis=1), df.cummax(axis=1))
np.cumsum(ddf, out=ddf_out)
assert_eq(ddf_out, df.cumsum())
np.cumprod(ddf, out=ddf_out)
assert_eq(ddf_out, df.cumprod())
ddf.cummin(out=ddf_out)
assert_eq(ddf_out, df.cummin())
ddf.cummax(out=ddf_out)
assert_eq(ddf_out, df.cummax())
np.cumsum(ddf, out=ddf_out, axis=1)
assert_eq(ddf_out, df.cumsum(axis=1))
np.cumprod(ddf, out=ddf_out, axis=1)
assert_eq(ddf_out, df.cumprod(axis=1))
ddf.cummin(out=ddf_out, axis=1)
assert_eq(ddf_out, df.cummin(axis=1))
ddf.cummax(out=ddf_out, axis=1)
assert_eq(ddf_out, df.cummax(axis=1))
assert_eq(ddf.a.cumsum(), df.a.cumsum())
assert_eq(ddf.a.cumprod(), df.a.cumprod())
assert_eq(ddf.a.cummin(), df.a.cummin())
assert_eq(ddf.a.cummax(), df.a.cummax())
# With NaNs
df = pd.DataFrame(
{
"a": [1, 2, np.nan, 4, 5, 6, 7, 8],
"b": [1, 2, np.nan, np.nan, np.nan, 5, np.nan, np.nan],
"c": [np.nan] * 8,
}
)
ddf = dd.from_pandas(df, 3)
assert_eq(df.cumsum(), ddf.cumsum())
assert_eq(df.cummin(), ddf.cummin())
assert_eq(df.cummax(), ddf.cummax())
assert_eq(df.cumprod(), ddf.cumprod())
assert_eq(df.cumsum(skipna=False), ddf.cumsum(skipna=False))
assert_eq(df.cummin(skipna=False), ddf.cummin(skipna=False))
assert_eq(df.cummax(skipna=False), ddf.cummax(skipna=False))
assert_eq(df.cumprod(skipna=False), ddf.cumprod(skipna=False))
assert_eq(df.cumsum(axis=1), ddf.cumsum(axis=1))
assert_eq(df.cummin(axis=1), ddf.cummin(axis=1))
assert_eq(df.cummax(axis=1), ddf.cummax(axis=1))
assert_eq(df.cumprod(axis=1), ddf.cumprod(axis=1))
assert_eq(df.cumsum(axis=1, skipna=False), ddf.cumsum(axis=1, skipna=False))
assert_eq(df.cummin(axis=1, skipna=False), ddf.cummin(axis=1, skipna=False))
assert_eq(df.cummax(axis=1, skipna=False), ddf.cummax(axis=1, skipna=False))
assert_eq(df.cumprod(axis=1, skipna=False), ddf.cumprod(axis=1, skipna=False))
@pytest.mark.parametrize(
"func",
[
M.cumsum,
M.cumprod,
pytest.param(
M.cummin,
marks=[
pytest.mark.xfail(
reason="ValueError: Can only compare identically-labeled Series objects"
)
],
),
pytest.param(
M.cummax,
marks=[
pytest.mark.xfail(
reason="ValueError: Can only compare identically-labeled Series objects"
)
],
),
],
)
def test_cumulative_empty_partitions(func):
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=4)
assert_eq(func(df[df.x < 5]), func(ddf[ddf.x < 5]))
df = pd.DataFrame({"x": [1, 2, 3, 4, None, 5, 6, None, 7, 8]})
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(func(df[df.x < 5]), func(ddf[ddf.x < 5]))
def test_dropna():
df = pd.DataFrame(
{
"x": [np.nan, 2, 3, 4, np.nan, 6],
"y": [1, 2, np.nan, 4, np.nan, np.nan],
"z": [1, 2, 3, 4, np.nan, 6],
},
index=[10, 20, 30, 40, 50, 60],
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.x.dropna(), df.x.dropna())
assert_eq(ddf.y.dropna(), df.y.dropna())
assert_eq(ddf.z.dropna(), df.z.dropna())
assert_eq(ddf.dropna(), df.dropna())
assert_eq(ddf.dropna(how="all"), df.dropna(how="all"))
assert_eq(ddf.dropna(subset=["x"]), df.dropna(subset=["x"]))
assert_eq(ddf.dropna(subset=["y", "z"]), df.dropna(subset=["y", "z"]))
assert_eq(
ddf.dropna(subset=["y", "z"], how="all"),
df.dropna(subset=["y", "z"], how="all"),
)
# threshold
assert_eq(df.dropna(thresh=None), df.loc[[20, 40]])
assert_eq(ddf.dropna(thresh=None), df.dropna(thresh=None))
assert_eq(df.dropna(thresh=0), df.loc[:])
assert_eq(ddf.dropna(thresh=0), df.dropna(thresh=0))
assert_eq(df.dropna(thresh=1), df.loc[[10, 20, 30, 40, 60]])
assert_eq(ddf.dropna(thresh=1), df.dropna(thresh=1))
assert_eq(df.dropna(thresh=2), df.loc[[10, 20, 30, 40, 60]])
assert_eq(ddf.dropna(thresh=2), df.dropna(thresh=2))
assert_eq(df.dropna(thresh=3), df.loc[[20, 40]])
assert_eq(ddf.dropna(thresh=3), df.dropna(thresh=3))
# Regression test for https://github.com/dask/dask/issues/6540
df = pd.DataFrame({"_0": [0, 0, np.nan], "_1": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.dropna(subset=["_0"]), df.dropna(subset=["_0"]))
@pytest.mark.parametrize("lower, upper", [(2, 5), (2.5, 3.5)])
def test_clip(lower, upper):
df = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [3, 5, 2, 5, 7, 2, 4, 2, 4]}
)
ddf = dd.from_pandas(df, 3)
s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9])
ds = dd.from_pandas(s, 3)
assert_eq(ddf.clip(lower=lower, upper=upper), df.clip(lower=lower, upper=upper))
assert_eq(ddf.clip(lower=lower), df.clip(lower=lower))
assert_eq(ddf.clip(upper=upper), df.clip(upper=upper))
assert_eq(ds.clip(lower=lower, upper=upper), s.clip(lower=lower, upper=upper))
assert_eq(ds.clip(lower=lower), s.clip(lower=lower))
assert_eq(ds.clip(upper=upper), s.clip(upper=upper))
def test_squeeze():
df = pd.DataFrame({"x": [1, 3, 6]})
df2 = pd.DataFrame({"x": [0]})
s = pd.Series({"test": 0, "b": 100})
ddf = dd.from_pandas(df, 3)
ddf2 = dd.from_pandas(df2, 3)
ds = dd.from_pandas(s, 2)
assert_eq(df.squeeze(), ddf.squeeze())
assert_eq(pd.Series([0], name="x"), ddf2.squeeze())
assert_eq(ds.squeeze(), s.squeeze())
with pytest.raises(NotImplementedError) as info:
ddf.squeeze(axis=0)
msg = f"{type(ddf)} does not support squeeze along axis 0"
assert msg in str(info.value)
with pytest.raises(ValueError) as info:
ddf.squeeze(axis=2)
msg = f"No axis {2} for object type {type(ddf)}"
assert msg in str(info.value)
with pytest.raises(ValueError) as info:
ddf.squeeze(axis="test")
msg = f"No axis test for object type {type(ddf)}"
assert msg in str(info.value)
def test_where_mask():
pdf1 = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [3, 5, 2, 5, 7, 2, 4, 2, 4]}
)
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({"a": [True, False, True] * 3, "b": [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
# different index
pdf3 = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [3, 5, 2, 5, 7, 2, 4, 2, 4]},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8],
)
ddf3 = dd.from_pandas(pdf3, 2)
pdf4 = pd.DataFrame(
{"a": [True, False, True] * 3, "b": [False, False, True] * 3},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13],
)
ddf4 = dd.from_pandas(pdf4, 2)
# different columns
pdf5 = pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [9, 4, 2, 6, 2, 3, 1, 6, 2],
"c": [5, 6, 7, 8, 9, 10, 11, 12, 13],
},
index=[0, 1, 2, 3, 4, 5, 6, 7, 8],
)
ddf5 = dd.from_pandas(pdf5, 2)
pdf6 = pd.DataFrame(
{
"a": [True, False, True] * 3,
"b": [False, False, True] * 3,
"c": [False] * 9,
"d": [True] * 9,
},
index=[5, 6, 7, 8, 9, 10, 11, 12, 13],
)
ddf6 = dd.from_pandas(pdf6, 2)
cases = [
(ddf1, ddf2, pdf1, pdf2),
(ddf1.repartition([0, 3, 6, 8]), ddf2, pdf1, pdf2),
(ddf1, ddf4, pdf3, pdf4),
(ddf3.repartition([0, 4, 6, 8]), ddf4.repartition([5, 9, 10, 13]), pdf3, pdf4),
(ddf5, ddf6, pdf5, pdf6),
(ddf5.repartition([0, 4, 7, 8]), ddf6, pdf5, pdf6),
# use pd.DataFrame as cond
(ddf1, pdf2, pdf1, pdf2),
(ddf1, pdf4, pdf3, pdf4),
(ddf5, pdf6, pdf5, pdf6),
]
for ddf, ddcond, pdf, pdcond in cases:
assert isinstance(ddf, dd.DataFrame)
assert isinstance(ddcond, (dd.DataFrame, pd.DataFrame))
assert isinstance(pdf, pd.DataFrame)
assert isinstance(pdcond, pd.DataFrame)
assert_eq(ddf.where(ddcond), pdf.where(pdcond))
assert_eq(ddf.mask(ddcond), pdf.mask(pdcond))
assert_eq(ddf.where(ddcond, -ddf), pdf.where(pdcond, -pdf))
assert_eq(ddf.mask(ddcond, -ddf), pdf.mask(pdcond, -pdf))
assert_eq(ddf.where(ddcond.a, -ddf), pdf.where(pdcond.a, -pdf))
assert_eq(ddf.mask(ddcond.a, -ddf), pdf.mask(pdcond.a, -pdf))
assert_eq(ddf.a.where(ddcond.a), pdf.a.where(pdcond.a))
assert_eq(ddf.a.mask(ddcond.a), pdf.a.mask(pdcond.a))
assert_eq(ddf.a.where(ddcond.a, -ddf.a), pdf.a.where(pdcond.a, -pdf.a))
assert_eq(ddf.a.mask(ddcond.a, -ddf.a), pdf.a.mask(pdcond.a, -pdf.a))
def test_map_partitions_multi_argument():
assert_eq(dd.map_partitions(lambda a, b: a + b, d.a, d.b), full.a + full.b)
assert_eq(
dd.map_partitions(lambda a, b, c: a + b + c, d.a, d.b, 1), full.a + full.b + 1
)
def test_map_partitions():
assert_eq(d.map_partitions(lambda df: df, meta=d), full)
assert_eq(d.map_partitions(lambda df: df), full)
result = d.map_partitions(lambda df: df.sum(axis=1))
layer = hlg_layer(result.dask, "lambda-")
assert not layer.is_materialized(), layer
assert_eq(result, full.sum(axis=1))
assert_eq(
d.map_partitions(lambda df: 1),
pd.Series([1, 1, 1], dtype=np.int64),
check_divisions=False,
)
x = Scalar({("x", 0): 1}, "x", int)
result = dd.map_partitions(lambda x: 2, x)
assert result.dtype in (np.int32, np.int64) and result.compute() == 2
result = dd.map_partitions(lambda x: 4.0, x)
assert result.dtype == np.float64 and result.compute() == 4.0
def test_map_partitions_type():
result = d.map_partitions(type).compute(scheduler="single-threaded")
assert isinstance(result, pd.Series)
assert all(x == pd.DataFrame for x in result)
def test_map_partitions_partition_info():
def f(df, partition_info=None):
assert partition_info is not None
assert "number" in partition_info
assert "division" in partition_info
assert dsk[("x", partition_info["number"])].equals(df)
assert dsk[("x", d.divisions.index(partition_info["division"]))].equals(df)
return df
df = d.map_partitions(f, meta=d)
layer = hlg_layer(df.dask, "f-")
assert not layer.is_materialized()
df.dask.validate()
result = df.compute(scheduler="single-threaded")
assert type(result) == pd.DataFrame
def test_map_partitions_names():
func = lambda x: x
assert sorted(dd.map_partitions(func, d, meta=d).dask) == sorted(
dd.map_partitions(func, d, meta=d).dask
)
assert sorted(dd.map_partitions(lambda x: x, d, meta=d, token=1).dask) == sorted(
dd.map_partitions(lambda x: x, d, meta=d, token=1).dask
)
func = lambda x, y: x
assert sorted(dd.map_partitions(func, d, d, meta=d).dask) == sorted(
dd.map_partitions(func, d, d, meta=d).dask
)
def test_map_partitions_column_info():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = dd.map_partitions(lambda x: x, a, meta=a)
tm.assert_index_equal(b.columns, a.columns)
assert_eq(df, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda x: x, a.x, meta=a.x)
assert b.name == a.x.name
assert_eq(df.x, b)
b = dd.map_partitions(lambda df: df.x + df.y, a)
assert isinstance(b, dd.Series)
assert b.dtype == "i8"
b = dd.map_partitions(lambda df: df.x + 1, a, meta=("x", "i8"))
assert isinstance(b, dd.Series)
assert b.name == "x"
assert b.dtype == "i8"
def test_map_partitions_method_names():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
b = a.map_partitions(lambda x: x)
assert isinstance(b, dd.DataFrame)
tm.assert_index_equal(b.columns, a.columns)
b = a.map_partitions(lambda df: df.x + 1)
assert isinstance(b, dd.Series)
assert b.dtype == "i8"
b = a.map_partitions(lambda df: df.x + 1, meta=("x", "i8"))
assert isinstance(b, dd.Series)
assert b.name == "x"
assert b.dtype == "i8"
def test_map_partitions_propagates_index_metadata():
index = pd.Series(list("abcde"), name="myindex")
df = pd.DataFrame(
{"A": np.arange(5, dtype=np.int32), "B": np.arange(10, 15, dtype=np.int32)},
index=index,
)
ddf = dd.from_pandas(df, npartitions=2)
res = ddf.map_partitions(
lambda df: df.assign(C=df.A + df.B),
meta=[("A", "i4"), ("B", "i4"), ("C", "i4")],
)
sol = df.assign(C=df.A + df.B)
assert_eq(res, sol)
res = ddf.map_partitions(lambda df: df.rename_axis("newindex"))
sol = df.rename_axis("newindex")
assert_eq(res, sol)
@pytest.mark.xfail(reason="now we use SubgraphCallables")
def test_map_partitions_keeps_kwargs_readable():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
def f(s, x=1):
return s + x
b = a.x.map_partitions(f, x=5)
# NOTE: we'd like to ensure that we keep the keyword arguments readable
# in the dask graph
assert "['x', 5]" in str(dict(b.dask)) or "{'x': 5}" in str(dict(b.dask))
assert_eq(df.x + 5, b)
assert a.x.map_partitions(f, x=5)._name != a.x.map_partitions(f, x=6)._name
def test_map_partitions_with_delayed_collection():
# https://github.com/dask/dask/issues/5854
df = pd.DataFrame(columns=list("abcdefghijk"))
ddf = dd.from_pandas(df, 2)
ddf.dropna(subset=list("abcdefghijk")).compute()
# no error!
def test_metadata_inference_single_partition_aligned_args():
# https://github.com/dask/dask/issues/3034
# Previously broadcastable series functionality broke this
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=1)
def check(df, df_x):
assert len(df) == len(df_x)
assert len(df) > 0
return df
res = dd.map_partitions(check, ddf, ddf.x)
assert_eq(res, ddf)
def test_align_dataframes():
df1 = pd.DataFrame({"A": [1, 2, 3, 3, 2, 3], "B": [1, 2, 3, 4, 5, 6]})
df2 = pd.DataFrame({"A": [3, 1, 2], "C": [1, 2, 3]})
def merge(a, b):
res = pd.merge(a, b, left_on="A", right_on="A", how="left")
return res
expected = merge(df1, df2)
ddf1 = dd.from_pandas(df1, npartitions=2)
actual = ddf1.map_partitions(merge, df2, align_dataframes=False)
assert_eq(actual, expected, check_index=False, check_divisions=False)
def test_drop_duplicates():
res = d.drop_duplicates()
res2 = d.drop_duplicates(split_every=2)
sol = full.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.a.drop_duplicates()
res2 = d.a.drop_duplicates(split_every=2)
sol = full.a.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = d.index.drop_duplicates()
res2 = d.index.drop_duplicates(split_every=2)
sol = full.index.drop_duplicates()
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
with pytest.raises(NotImplementedError):
d.drop_duplicates(keep=False)
def test_drop_duplicates_subset():
df = pd.DataFrame({"x": [1, 2, 3, 1, 2, 3], "y": ["a", "a", "b", "b", "c", "c"]})
ddf = dd.from_pandas(df, npartitions=2)
for kwarg in [{"keep": "first"}, {"keep": "last"}]:
assert_eq(df.x.drop_duplicates(**kwarg), ddf.x.drop_duplicates(**kwarg))
for ss in [["x"], "y", ["x", "y"]]:
assert_eq(
df.drop_duplicates(subset=ss, **kwarg),
ddf.drop_duplicates(subset=ss, **kwarg),
)
assert_eq(df.drop_duplicates(ss, **kwarg), ddf.drop_duplicates(ss, **kwarg))
def test_get_partition():
pdf = pd.DataFrame(np.random.randn(10, 5), columns=list("abcde"))
ddf = dd.from_pandas(pdf, 3)
assert ddf.divisions == (0, 4, 8, 9)
# DataFrame
div1 = ddf.get_partition(0)
assert isinstance(div1, dd.DataFrame)
assert_eq(div1, pdf.loc[0:3])
div2 = ddf.get_partition(1)
assert_eq(div2, pdf.loc[4:7])
div3 = ddf.get_partition(2)
assert_eq(div3, pdf.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf)
# Series
div1 = ddf.a.get_partition(0)
assert isinstance(div1, dd.Series)
assert_eq(div1, pdf.a.loc[0:3])
div2 = ddf.a.get_partition(1)
assert_eq(div2, pdf.a.loc[4:7])
div3 = ddf.a.get_partition(2)
assert_eq(div3, pdf.a.loc[8:9])
assert len(div1) + len(div2) + len(div3) == len(pdf.a)
with pytest.raises(ValueError):
ddf.get_partition(-1)
with pytest.raises(ValueError):
ddf.get_partition(3)
def test_ndim():
assert d.ndim == 2
assert d.a.ndim == 1
assert d.index.ndim == 1
def test_dtype():
assert (d.dtypes == full.dtypes).all()
def test_value_counts():
df = pd.DataFrame({"x": [1, 2, 1, 3, 3, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts()
expected = df.x.value_counts()
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2)
assert_eq(result2, expected)
assert result._name != result2._name
def test_value_counts_not_sorted():
df = pd.DataFrame({"x": [1, 2, 1, 3, 3, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts(sort=False)
expected = df.x.value_counts(sort=False)
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2)
assert_eq(result2, expected)
assert result._name != result2._name
def test_value_counts_with_dropna():
df = pd.DataFrame({"x": [1, 2, 1, 3, np.nan, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
if not PANDAS_GT_110:
with pytest.raises(NotImplementedError, match="dropna is not a valid argument"):
ddf.x.value_counts(dropna=False)
return
result = ddf.x.value_counts(dropna=False)
expected = df.x.value_counts(dropna=False)
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2, dropna=False)
assert_eq(result2, expected)
assert result._name != result2._name
def test_value_counts_with_normalize():
df = pd.DataFrame({"x": [1, 2, 1, 3, 3, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts(normalize=True)
expected = df.x.value_counts(normalize=True)
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2, normalize=True)
assert_eq(result2, expected)
assert result._name != result2._name
result3 = ddf.x.value_counts(split_out=2, normalize=True)
assert_eq(result3, expected)
assert result._name != result3._name
@pytest.mark.skipif(not PANDAS_GT_110, reason="dropna implemented in pandas 1.1.0")
def test_value_counts_with_normalize_and_dropna():
df = pd.DataFrame({"x": [1, 2, 1, 3, np.nan, 1, 4]})
ddf = dd.from_pandas(df, npartitions=3)
result = ddf.x.value_counts(dropna=False, normalize=True)
expected = df.x.value_counts(dropna=False, normalize=True)
assert_eq(result, expected)
result2 = ddf.x.value_counts(split_every=2, dropna=False, normalize=True)
assert_eq(result2, expected)
assert result._name != result2._name
result3 = ddf.x.value_counts(split_out=2, dropna=False, normalize=True)
assert_eq(result3, expected)
assert result._name != result3._name
result4 = ddf.x.value_counts(dropna=True, normalize=True, split_out=2)
expected4 = df.x.value_counts(dropna=True, normalize=True)
assert_eq(result4, expected4)
def test_unique():
pdf = pd.DataFrame(
{
"x": [1, 2, 1, 3, 3, 1, 4, 2, 3, 1],
"y": ["a", "c", "b", np.nan, "c", "b", "a", "d", np.nan, "a"],
}
)
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(ddf.x.unique(), pd.Series(pdf.x.unique(), name="x"))
assert_eq(ddf.y.unique(), pd.Series(pdf.y.unique(), name="y"))
assert_eq(ddf.x.unique(split_every=2), pd.Series(pdf.x.unique(), name="x"))
assert_eq(ddf.y.unique(split_every=2), pd.Series(pdf.y.unique(), name="y"))
assert_eq(ddf.index.unique(), pdf.index.unique())
assert ddf.x.unique(split_every=2)._name != ddf.x.unique()._name
def test_isin():
f_list = [1, 2, 3]
f_series = pd.Series(f_list)
f_dict = {"a": [0, 3], "b": [1, 2]}
# Series
assert_eq(d.a.isin(f_list), full.a.isin(f_list))
assert_eq(d.a.isin(f_series), full.a.isin(f_series))
with pytest.raises(NotImplementedError):
d.a.isin(d.a)
# Index
da.utils.assert_eq(d.index.isin(f_list), full.index.isin(f_list))
da.utils.assert_eq(d.index.isin(f_series), full.index.isin(f_series))
with pytest.raises(NotImplementedError):
d.a.isin(d.a)
# DataFrame test
assert_eq(d.isin(f_list), full.isin(f_list))
assert_eq(d.isin(f_dict), full.isin(f_dict))
for obj in [d, f_series, full]:
with pytest.raises(NotImplementedError):
d.isin(obj)
def test_contains_frame():
df = dd.from_pandas(pd.DataFrame({"A": [1, 2], 0: [3, 4]}), 1)
assert "A" in df
assert 0 in df
assert "B" not in df
assert 1 not in df
def test_len():
assert len(d) == len(full)
assert len(d.a) == len(full.a)
assert len(dd.from_pandas(pd.DataFrame(), npartitions=1)) == 0
assert len(dd.from_pandas(pd.DataFrame(columns=[1, 2]), npartitions=1)) == 0
# Regression test for https://github.com/dask/dask/issues/6110
assert len(dd.from_pandas(pd.DataFrame(columns=["foo", "foo"]), npartitions=1)) == 0
def test_size():
assert_eq(d.size, full.size)
assert_eq(d.a.size, full.a.size)
assert_eq(d.index.size, full.index.size)
def test_shape():
result = d.shape
assert_eq((result[0].compute(), result[1]), (len(full), len(full.columns)))
assert_eq(dd.compute(result)[0], (len(full), len(full.columns)))
result = d.a.shape
assert_eq(result[0].compute(), len(full.a))
assert_eq(dd.compute(result)[0], (len(full.a),))
sh = dd.from_pandas(pd.DataFrame(index=[1, 2, 3]), npartitions=2).shape
assert (sh[0].compute(), sh[1]) == (3, 0)
sh = dd.from_pandas(pd.DataFrame({"a": [], "b": []}, index=[]), npartitions=1).shape
assert (sh[0].compute(), sh[1]) == (0, 2)
def test_nbytes():
assert_eq(d.a.nbytes, full.a.nbytes)
assert_eq(d.index.nbytes, full.index.nbytes)
@pytest.mark.parametrize(
"method,expected",
[("tdigest", (0.35, 3.80, 2.5, 6.5, 2.0)), ("dask", (0.0, 4.0, 1.2, 6.2, 2.0))],
)
def test_quantile(method, expected):
if method == "tdigest":
pytest.importorskip("crick")
# series / multiple
result = d.b.quantile([0.3, 0.7], method=method)
exp = full.b.quantile([0.3, 0.7]) # result may different
assert len(result) == 2
assert result.divisions == (0.3, 0.7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == pytest.approx(expected[0])
assert result.iloc[1] == pytest.approx(expected[1])
# index
s = pd.Series(np.arange(10), index=np.arange(10))
ds = dd.from_pandas(s, 2)
result = ds.index.quantile([0.3, 0.7], method=method)
exp = s.quantile([0.3, 0.7])
assert len(result) == 2
assert result.divisions == (0.3, 0.7)
assert_eq(result.index, exp.index)
assert isinstance(result, dd.Series)
result = result.compute()
assert isinstance(result, pd.Series)
assert result.iloc[0] == pytest.approx(expected[2])
assert result.iloc[1] == pytest.approx(expected[3])
# series / single
result = d.b.quantile(0.5, method=method)
assert isinstance(result, dd.core.Scalar)
result = result.compute()
assert result == expected[4]
@pytest.mark.parametrize("method", ["tdigest", "dask"])
def test_quantile_missing(method):
if method == "tdigest":
pytest.importorskip("crick")
df = pd.DataFrame({"A": [0, np.nan, 2]})
ddf = dd.from_pandas(df, 2)
expected = df.quantile()
result = ddf.quantile(method=method)
assert_eq(result, expected)
expected = df.A.quantile()
result = ddf.A.quantile(method=method)
assert_eq(result, expected)
@pytest.mark.parametrize("method", ["tdigest", "dask"])
def test_empty_quantile(method):
if method == "tdigest":
pytest.importorskip("crick")
result = d.b.quantile([], method=method)
exp = full.b.quantile([])
assert result.divisions == (None, None)
assert result.name == "b"
assert result.compute().name == "b"
assert_eq(result, exp)
@pytest.mark.parametrize(
"method,expected",
[
(
"tdigest",
(
pd.Series([9.5, 29.5, 19.5], index=["A", "X", "B"]),
pd.DataFrame(
[[4.5, 24.5, 14.5], [14.5, 34.5, 24.5]],
index=[0.25, 0.75],
columns=["A", "X", "B"],
),
),
),
(
"dask",
(
pd.Series([7.0, 27.0, 17.0], index=["A", "X", "B"]),
pd.DataFrame(
[[1.50, 21.50, 11.50], [14.0, 34.0, 24.0]],
index=[0.25, 0.75],
columns=["A", "X", "B"],
),
),
),
],
)
def test_dataframe_quantile(method, expected):
if method == "tdigest":
pytest.importorskip("crick")
# column X is for test column order and result division
df = pd.DataFrame(
{
"A": np.arange(20),
"X": np.arange(20, 40),
"B": np.arange(10, 30),
"C": ["a", "b", "c", "d"] * 5,
},
columns=["A", "X", "B", "C"],
)
ddf = dd.from_pandas(df, 3)
result = ddf.quantile(method=method)
assert result.npartitions == 1
assert result.divisions == ("A", "X")
result = result.compute()
assert isinstance(result, pd.Series)
assert result.name == 0.5
tm.assert_index_equal(result.index, pd.Index(["A", "X", "B"]))
assert (result == expected[0]).all()
result = ddf.quantile([0.25, 0.75], method=method)
assert result.npartitions == 1
assert result.divisions == (0.25, 0.75)
result = result.compute()
assert isinstance(result, pd.DataFrame)
tm.assert_index_equal(result.index, pd.Index([0.25, 0.75]))
tm.assert_index_equal(result.columns, pd.Index(["A", "X", "B"]))
assert (result == expected[1]).all().all()
assert_eq(ddf.quantile(axis=1, method=method), df.quantile(axis=1))
pytest.raises(ValueError, lambda: ddf.quantile([0.25, 0.75], axis=1, method=method))
def test_quantile_for_possibly_unsorted_q():
"""check that quantile is giving correct answers even when quantile parameter, q, may be unsorted.
See https://github.com/dask/dask/issues/4642.
"""
# prepare test case where percentiles should equal values
A = da.arange(0, 101)
ds = dd.from_dask_array(A)
for q in [
[0.25, 0.50, 0.75],
[0.25, 0.50, 0.75, 0.99],
[0.75, 0.5, 0.25],
[0.25, 0.99, 0.75, 0.50],
]:
r = ds.quantile(q).compute()
assert_eq(r.loc[0.25], 25.0)
assert_eq(r.loc[0.50], 50.0)
assert_eq(r.loc[0.75], 75.0)
r = ds.quantile([0.25]).compute()
assert_eq(r.loc[0.25], 25.0)
r = ds.quantile(0.25).compute()
assert_eq(r, 25.0)
def test_quantile_tiny_partitions():
"""See https://github.com/dask/dask/issues/6551"""
df = pd.DataFrame({"a": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=3)
r = ddf["a"].quantile(0.5).compute()
assert r == 2
def test_quantile_trivial_partitions():
"""See https://github.com/dask/dask/issues/2792"""
df = pd.DataFrame({"A": []})
ddf = dd.from_pandas(df, npartitions=2)
expected = df.quantile(0.5)
assert_eq(ddf.quantile(0.5), expected)
df = pd.DataFrame({"A": [np.nan, np.nan, np.nan, np.nan]})
ddf = dd.from_pandas(df, npartitions=2)
expected = df.quantile(0.5)
assert_eq(ddf.quantile(0.5), expected)
def test_index():
assert_eq(d.index, full.index)
def test_assign():
df = pd.DataFrame(
{"a": range(8), "b": [float(i) for i in range(10, 18)]},
index=pd.Index(list("abcdefgh")),
)
ddf = dd.from_pandas(df, npartitions=3)
ddf_unknown = dd.from_pandas(df, npartitions=3, sort=False)
assert not ddf_unknown.known_divisions
res = ddf.assign(
c=1,
d="string",
e=ddf.a.sum(),
f=ddf.a + ddf.b,
g=lambda x: x.a + x.c,
dt=pd.Timestamp(2018, 2, 13),
)
res_unknown = ddf_unknown.assign(
c=1,
d="string",
e=ddf_unknown.a.sum(),
f=ddf_unknown.a + ddf_unknown.b,
g=lambda x: x.a + x.c,
dt=pd.Timestamp(2018, 2, 13),
)
sol = df.assign(
c=1,
d="string",
e=df.a.sum(),
f=df.a + df.b,
g=lambda x: x.a + x.c,
dt=pd.Timestamp(2018, 2, 13),
)
assert_eq(res, sol)
assert_eq(res_unknown, sol)
res = ddf.assign(c=df.a + 1)
assert_eq(res, df.assign(c=df.a + 1))
res = ddf.assign(c=ddf.index)
assert_eq(res, df.assign(c=df.index))
# divisions unknown won't work with pandas
with pytest.raises(ValueError):
ddf_unknown.assign(c=df.a + 1)
# unsupported type
with pytest.raises(TypeError):
ddf.assign(c=list(range(9)))
# Fails when assigning known divisions to unknown divisions
with pytest.raises(ValueError):
ddf_unknown.assign(foo=ddf.a)
# Fails when assigning unknown divisions to known divisions
with pytest.raises(ValueError):
ddf.assign(foo=ddf_unknown.a)
df = pd.DataFrame({"A": [1, 2]})
df.assign(B=lambda df: df["A"], C=lambda df: df.A + df.B)
ddf = dd.from_pandas(pd.DataFrame({"A": [1, 2]}), npartitions=2)
ddf.assign(B=lambda df: df["A"], C=lambda df: df.A + df.B)
assert_eq(df, ddf)
def test_assign_callable():
df = dd.from_pandas(pd.DataFrame({"A": range(10)}), npartitions=2)
a = df.assign(B=df.A.shift())
b = df.assign(B=lambda x: x.A.shift())
assert_eq(a, b)
def test_assign_dtypes():
ddf = dd.from_pandas(
pd.DataFrame(
data={"col1": ["a", "b"], "col2": [1, 2]}, columns=["col1", "col2"]
),
npartitions=2,
)
new_col = {"col3": pd.Series(["0", "1"])}
res = ddf.assign(**new_col)
assert_eq(
res.dtypes,
pd.Series(data=["object", "int64", "object"], index=["col1", "col2", "col3"]),
)
def test_map():
df = pd.DataFrame(
{"a": range(9), "b": [4, 5, 6, 1, 2, 3, 0, 0, 0]},
index=pd.Index([0, 1, 3, 5, 6, 8, 9, 9, 9], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.a.map(lambda x: x + 1), df.a.map(lambda x: x + 1))
lk = {v: v + 1 for v in df.a.values}
assert_eq(ddf.a.map(lk), df.a.map(lk))
assert_eq(ddf.b.map(lk), df.b.map(lk))
lk = pd.Series(lk)
assert_eq(ddf.a.map(lk), df.a.map(lk))
assert_eq(ddf.b.map(lk), df.b.map(lk))
assert_eq(ddf.b.map(lk, meta=ddf.b), df.b.map(lk))
assert_eq(ddf.b.map(lk, meta=("b", "i8")), df.b.map(lk))
def test_concat():
x = _concat([pd.DataFrame(columns=["a", "b"]), pd.DataFrame(columns=["a", "b"])])
assert list(x.columns) == ["a", "b"]
assert len(x) == 0
def test_args():
e = d.assign(c=d.a + 1)
f = type(e)(*e._args)
assert_eq(e, f)
assert_eq(d.a, type(d.a)(*d.a._args))
assert_eq(d.a.sum(), type(d.a.sum())(*d.a.sum()._args))
def test_known_divisions():
assert d.known_divisions
df = dd.DataFrame(dsk, "x", meta, divisions=[None, None, None])
assert not df.known_divisions
def test_unknown_divisions():
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 2, 1]}),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [0, 0, 0]}),
}
meta = make_meta({"a": "i8", "b": "i8"}, parent_meta=pd.DataFrame())
d = dd.DataFrame(dsk, "x", meta, [None, None, None, None])
full = d.compute(scheduler="sync")
assert_eq(d.a.sum(), full.a.sum())
assert_eq(d.a + d.b + 1, full.a + full.b + 1)
def test_with_min_count():
dfs = [
pd.DataFrame([[None, 2, 3], [None, 5, 6], [5, 4, 9]]),
pd.DataFrame([[2, None, None], [None, 5, 6], [5, 4, 9]]),
]
ddfs = [dd.from_pandas(df, npartitions=4) for df in dfs]
axes = [0, 1]
for df, ddf in zip(dfs, ddfs):
for axis in axes:
for min_count in [0, 1, 2, 3]:
assert_eq(
df.sum(min_count=min_count, axis=axis),
ddf.sum(min_count=min_count, axis=axis),
)
assert_eq(
df.prod(min_count=min_count, axis=axis),
ddf.prod(min_count=min_count, axis=axis),
)
@pytest.mark.parametrize("join", ["inner", "outer", "left", "right"])
def test_align(join):
df1a = pd.DataFrame(
{"A": np.random.randn(10), "B": np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11],
)
df1b = pd.DataFrame(
{"A": np.random.randn(10), "B": np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13],
)
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
# DataFrame
res1, res2 = ddf1a.align(ddf1b, join=join)
exp1, exp2 = df1a.align(df1b, join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a["A"].align(ddf1b["B"], join=join)
exp1, exp2 = df1a["A"].align(df1b["B"], join=join)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# DataFrame with fill_value
res1, res2 = ddf1a.align(ddf1b, join=join, fill_value=1)
exp1, exp2 = df1a.align(df1b, join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# Series
res1, res2 = ddf1a["A"].align(ddf1b["B"], join=join, fill_value=1)
exp1, exp2 = df1a["A"].align(df1b["B"], join=join, fill_value=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
@pytest.mark.parametrize("join", ["inner", "outer", "left", "right"])
def test_align_axis(join):
df1a = pd.DataFrame(
{"A": np.random.randn(10), "B": np.random.randn(10), "C": np.random.randn(10)},
index=[1, 12, 5, 6, 3, 9, 10, 4, 13, 11],
)
df1b = pd.DataFrame(
{"B": np.random.randn(10), "C": np.random.randn(10), "D": np.random.randn(10)},
index=[0, 3, 2, 10, 5, 6, 7, 8, 12, 13],
)
ddf1a = dd.from_pandas(df1a, 3)
ddf1b = dd.from_pandas(df1b, 3)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=0)
exp1, exp2 = df1a.align(df1b, join=join, axis=0)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis=1)
exp1, exp2 = df1a.align(df1b, join=join, axis=1)
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis="index")
exp1, exp2 = df1a.align(df1b, join=join, axis="index")
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
res1, res2 = ddf1a.align(ddf1b, join=join, axis="columns")
exp1, exp2 = df1a.align(df1b, join=join, axis="columns")
assert assert_eq(res1, exp1)
assert assert_eq(res2, exp2)
# invalid
with pytest.raises(ValueError):
ddf1a.align(ddf1b, join=join, axis="XXX")
with pytest.raises(ValueError):
ddf1a["A"].align(ddf1b["B"], join=join, axis=1)
def test_combine():
df1 = pd.DataFrame(
{
"A": np.random.choice([1, 2, np.nan], 100),
"B": np.random.choice(["a", "b", "nan"], 100),
}
)
df2 = pd.DataFrame(
{
"A": np.random.choice([1, 2, 3], 100),
"B": np.random.choice(["a", "b", "c"], 100),
}
)
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
first = lambda a, b: a
# You can add series with strings and nans but you can't add scalars 'a' + np.NaN
str_add = lambda a, b: a + b if a is not np.nan else a
# DataFrame
for dda, ddb, a, b, runs in [
(ddf1, ddf2, df1, df2, [(add, None), (first, None)]),
(ddf1.A, ddf2.A, df1.A, df2.A, [(add, None), (add, 100), (first, None)]),
(
ddf1.B,
ddf2.B,
df1.B,
df2.B,
[(str_add, None), (str_add, "d"), (first, None)],
),
]:
for func, fill_value in runs:
sol = a.combine(b, func, fill_value=fill_value)
assert_eq(dda.combine(ddb, func, fill_value=fill_value), sol)
assert_eq(dda.combine(b, func, fill_value=fill_value), sol)
assert_eq(
ddf1.combine(ddf2, add, overwrite=False), df1.combine(df2, add, overwrite=False)
)
assert dda.combine(ddb, add)._name == dda.combine(ddb, add)._name
def test_combine_first():
df1 = pd.DataFrame(
{
"A": np.random.choice([1, 2, np.nan], 100),
"B": np.random.choice(["a", "b", "nan"], 100),
}
)
df2 = pd.DataFrame(
{
"A": np.random.choice([1, 2, 3], 100),
"B": np.random.choice(["a", "b", "c"], 100),
}
)
ddf1 = dd.from_pandas(df1, 4)
ddf2 = dd.from_pandas(df2, 5)
# DataFrame
assert_eq(ddf1.combine_first(ddf2), df1.combine_first(df2))
assert_eq(ddf1.combine_first(df2), df1.combine_first(df2))
# Series
assert_eq(ddf1.A.combine_first(ddf2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.A.combine_first(df2.A), df1.A.combine_first(df2.A))
assert_eq(ddf1.B.combine_first(ddf2.B), df1.B.combine_first(df2.B))
assert_eq(ddf1.B.combine_first(df2.B), df1.B.combine_first(df2.B))
def test_dataframe_picklable():
from pickle import dumps, loads
from cloudpickle import dumps as cp_dumps
from cloudpickle import loads as cp_loads
d = _compat.makeTimeDataFrame()
df = dd.from_pandas(d, npartitions=3)
df = df + 2
# dataframe
df2 = loads(dumps(df))
assert_eq(df, df2)
df2 = cp_loads(cp_dumps(df))
assert_eq(df, df2)
# series
a2 = loads(dumps(df.A))
assert_eq(df.A, a2)
a2 = cp_loads(cp_dumps(df.A))
assert_eq(df.A, a2)
# index
i2 = loads(dumps(df.index))
assert_eq(df.index, i2)
i2 = cp_loads(cp_dumps(df.index))
assert_eq(df.index, i2)
# scalar
# lambdas are present, so only test cloudpickle
s = df.A.sum()
s2 = cp_loads(cp_dumps(s))
assert_eq(s, s2)
def test_random_partitions():
a, b = d.random_split([0.5, 0.5], 42)
assert isinstance(a, dd.DataFrame)
assert isinstance(b, dd.DataFrame)
assert a._name != b._name
np.testing.assert_array_equal(a.index, sorted(a.index))
assert len(a.compute()) + len(b.compute()) == len(full)
a2, b2 = d.random_split([0.5, 0.5], 42)
assert a2._name == a._name
assert b2._name == b._name
a, b = d.random_split([0.5, 0.5], 42, True)
a2, b2 = d.random_split([0.5, 0.5], 42, True)
assert_eq(a, a2)
assert_eq(b, b2)
with pytest.raises(AssertionError):
np.testing.assert_array_equal(a.index, sorted(a.index))
parts = d.random_split([0.4, 0.5, 0.1], 42)
names = {p._name for p in parts}
names.update([a._name, b._name])
assert len(names) == 5
with pytest.raises(ValueError):
d.random_split([0.4, 0.5], 42)
def test_series_round():
ps = pd.Series([1.123, 2.123, 3.123, 1.234, 2.234, 3.234], name="a")
s = dd.from_pandas(ps, npartitions=3)
assert_eq(s.round(), ps.round())
@pytest.mark.slow
def test_repartition():
def _check_split_data(orig, d):
"""Check data is split properly"""
keys = [k for k in d.dask if k[0].startswith("repartition-split")]
keys = sorted(keys)
sp = pd.concat(
[compute_as_if_collection(dd.DataFrame, d.dask, k) for k in keys]
)
assert_eq(orig, sp)
assert_eq(orig, d)
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")}, index=[10, 20, 30, 40, 50, 60]
)
a = dd.from_pandas(df, 2)
b = a.repartition(divisions=[10, 20, 50, 60])
assert b.divisions == (10, 20, 50, 60)
assert_eq(a, b)
assert_eq(compute_as_if_collection(dd.DataFrame, b.dask, (b._name, 0)), df.iloc[:1])
for div in [
[20, 60],
[10, 50],
[1], # first / last element mismatch
[0, 60],
[10, 70], # do not allow to expand divisions by default
[10, 50, 20, 60], # not sorted
[10, 10, 20, 60],
]: # not unique (last element can be duplicated)
pytest.raises(ValueError, lambda: a.repartition(divisions=div))
pdf = pd.DataFrame(np.random.randn(7, 5), columns=list("abxyz"))
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [
[0, 6],
[0, 6, 6],
[0, 5, 6],
[0, 4, 6, 6],
[0, 2, 6],
[0, 2, 6, 6],
[0, 2, 3, 6, 6],
[0, 1, 2, 3, 4, 5, 6, 6],
]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [[-5, 10], [-2, 3, 5, 6], [0, 4, 5, 9, 10]]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
pdf = pd.DataFrame(
{"x": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9], "y": [9, 8, 7, 6, 5, 4, 3, 2, 1, 0]},
index=list("abcdefghij"),
)
for p in range(1, 7):
ddf = dd.from_pandas(pdf, p)
assert_eq(ddf, pdf)
for div in [
list("aj"),
list("ajj"),
list("adj"),
list("abfj"),
list("ahjj"),
list("acdj"),
list("adfij"),
list("abdefgij"),
list("abcdefghij"),
]:
rddf = ddf.repartition(divisions=div)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
# expand divisions
for div in [list("Yadijm"), list("acmrxz"), list("Yajz")]:
rddf = ddf.repartition(divisions=div, force=True)
_check_split_data(ddf, rddf)
assert rddf.divisions == tuple(div)
assert_eq(pdf, rddf)
rds = ddf.x.repartition(divisions=div, force=True)
_check_split_data(ddf.x, rds)
assert rds.divisions == tuple(div)
assert_eq(pdf.x, rds)
def test_repartition_divisions():
result = repartition_divisions([0, 6], [0, 6, 6], "a", "b", "c")
assert result == {
("b", 0): (methods.boundary_slice, ("a", 0), 0, 6, False),
("b", 1): (methods.boundary_slice, ("a", 0), 6, 6, True),
("c", 0): ("b", 0),
("c", 1): ("b", 1),
}
result = repartition_divisions([1, 3, 7], [1, 4, 6, 7], "a", "b", "c")
assert result == {
("b", 0): (methods.boundary_slice, ("a", 0), 1, 3, False),
("b", 1): (methods.boundary_slice, ("a", 1), 3, 4, False),
("b", 2): (methods.boundary_slice, ("a", 1), 4, 6, False),
("b", 3): (methods.boundary_slice, ("a", 1), 6, 7, True),
("c", 0): (methods.concat, [("b", 0), ("b", 1)]),
("c", 1): ("b", 2),
("c", 2): ("b", 3),
}
def test_repartition_on_pandas_dataframe():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")}, index=[10, 20, 30, 40, 50, 60]
)
ddf = dd.repartition(df, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.DataFrame)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df)
ddf = dd.repartition(df.y, divisions=[10, 20, 50, 60])
assert isinstance(ddf, dd.Series)
assert ddf.divisions == (10, 20, 50, 60)
assert_eq(ddf, df.y)
@pytest.mark.parametrize("use_index", [True, False])
@pytest.mark.parametrize("n", [1, 2, 4, 5])
@pytest.mark.parametrize("k", [1, 2, 4, 5])
@pytest.mark.parametrize("dtype", [float, "M8[ns]"])
@pytest.mark.parametrize("transform", [lambda df: df, lambda df: df.x])
def test_repartition_npartitions(use_index, n, k, dtype, transform):
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6] * 10, "y": list("abdabd") * 10},
index=pd.Series([1, 2, 3, 4, 5, 6] * 10, dtype=dtype),
)
df = transform(df)
a = dd.from_pandas(df, npartitions=n, sort=use_index)
b = a.repartition(k)
assert_eq(a, b)
assert b.npartitions == k
parts = dask.get(b.dask, b.__dask_keys__())
assert all(map(len, parts))
@pytest.mark.parametrize("use_index", [True, False])
@pytest.mark.parametrize("n", [2, 5])
@pytest.mark.parametrize("partition_size", ["1kiB", 379])
@pytest.mark.parametrize("transform", [lambda df: df, lambda df: df.x])
def test_repartition_partition_size(use_index, n, partition_size, transform):
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5, 6] * 10, "y": list("abdabd") * 10},
index=pd.Series([10, 20, 30, 40, 50, 60] * 10),
)
df = transform(df)
a = dd.from_pandas(df, npartitions=n, sort=use_index)
b = a.repartition(partition_size=partition_size)
assert_eq(a, b, check_divisions=False)
assert np.alltrue(b.map_partitions(total_mem_usage, deep=True).compute() <= 1024)
parts = dask.get(b.dask, b.__dask_keys__())
assert all(map(len, parts))
def test_repartition_partition_size_arg():
df = pd.DataFrame({"x": range(10)})
a = dd.from_pandas(df, npartitions=2)
b = a.repartition("1 MiB")
assert b.npartitions == 1
def test_repartition_npartitions_same_limits():
df = pd.DataFrame(
{"x": [1, 2, 3]},
index=[
pd.Timestamp("2017-05-09 00:00:00.006000"),
pd.Timestamp("2017-05-09 02:45:00.017999"),
pd.Timestamp("2017-05-09 05:59:58.938999"),
],
)
ddf = dd.from_pandas(df, npartitions=2)
ddf.repartition(npartitions=10)
def test_repartition_npartitions_numeric_edge_case():
"""
Test that we cover numeric edge cases when
int(ddf.npartitions / npartitions) * npartitions) != ddf.npartitions
"""
df = pd.DataFrame({"x": range(100)})
a = dd.from_pandas(df, npartitions=15)
assert a.npartitions == 15
b = a.repartition(npartitions=11)
assert_eq(a, b)
def test_repartition_object_index():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6] * 10}, index=list("abdabd") * 10)
a = dd.from_pandas(df, npartitions=5)
b = a.repartition(npartitions=2)
assert b.npartitions == 2
assert_eq(b, df)
b = a.repartition(npartitions=10)
assert b.npartitions == 10
assert_eq(b, df)
assert not b.known_divisions
@pytest.mark.slow
@pytest.mark.parametrize("npartitions", [1, 20, 243])
@pytest.mark.parametrize("freq", ["1D", "7D", "28h", "1h"])
@pytest.mark.parametrize(
"end", ["2000-04-15", "2000-04-15 12:37:01", "2000-01-01 12:37:00"]
)
@pytest.mark.parametrize(
"start", ["2000-01-01", "2000-01-01 12:30:00", "2000-01-01 12:30:00"]
)
def test_repartition_freq(npartitions, freq, start, end):
start = pd.Timestamp(start)
end = pd.Timestamp(end)
ind = pd.date_range(start=start, end=end, freq="60s")
df = pd.DataFrame({"x": np.arange(len(ind))}, index=ind)
ddf = dd.from_pandas(df, npartitions=npartitions, name="x")
ddf2 = ddf.repartition(freq=freq)
assert_eq(ddf2, df)
def test_repartition_freq_divisions():
df = pd.DataFrame(
{"x": np.random.random(10)},
index=pd.DatetimeIndex(np.random.random(10) * 100e9),
)
ddf = dd.from_pandas(df, npartitions=3)
ddf2 = ddf.repartition(freq="15s")
for div in ddf2.divisions[1:-1]:
assert div == div.round("15s")
assert ddf2.divisions[0] == df.index.min()
assert ddf2.divisions[-1] == df.index.max()
assert_eq(ddf2, df)
def test_repartition_freq_errors():
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(TypeError) as info:
ddf.repartition(freq="1s")
assert "only" in str(info.value)
assert "timeseries" in str(info.value)
def test_repartition_freq_month():
ts = pd.date_range("2015-01-01 00:00", "2015-05-01 23:50", freq="10min")
df = pd.DataFrame(
np.random.randint(0, 100, size=(len(ts), 4)), columns=list("ABCD"), index=ts
)
ddf = dd.from_pandas(df, npartitions=1).repartition(freq="MS")
assert_eq(df, ddf)
assert ddf.divisions == (
pd.Timestamp("2015-1-1 00:00:00"),
pd.Timestamp("2015-2-1 00:00:00"),
pd.Timestamp("2015-3-1 00:00:00"),
pd.Timestamp("2015-4-1 00:00:00"),
pd.Timestamp("2015-5-1 00:00:00"),
pd.Timestamp("2015-5-1 23:50:00"),
)
assert ddf.npartitions == 5
def test_repartition_freq_day():
index = [
pd.Timestamp("2020-1-1"),
pd.Timestamp("2020-1-1"),
pd.Timestamp("2020-1-2"),
pd.Timestamp("2020-1-2"),
]
pdf = pd.DataFrame(index=index, data={"foo": "foo"})
ddf = dd.from_pandas(pdf, npartitions=1).repartition(freq="D")
assert_eq(ddf, pdf)
assert ddf.npartitions == 2
assert ddf.divisions == (
pd.Timestamp("2020-1-1"),
pd.Timestamp("2020-1-2"),
pd.Timestamp("2020-1-2"),
)
@pytest.mark.parametrize(
"freq, expected_freq",
[
("M", "MS"),
("MS", "MS"),
("2M", "2MS"),
("Q", "QS"),
("Q-FEB", "QS-FEB"),
("2Q", "2QS"),
("2Q-FEB", "2QS-FEB"),
("2QS-FEB", "2QS-FEB"),
("BQ", "BQS"),
("2BQ", "2BQS"),
("SM", "SMS"),
("A", "AS"),
("A-JUN", "AS-JUN"),
("BA", "BAS"),
("2BA", "2BAS"),
("BY", "BAS"),
("Y", "AS"),
(pd.Timedelta(seconds=1), pd.Timedelta(seconds=1)),
],
)
def test_map_freq_to_period_start(freq, expected_freq):
new_freq = _map_freq_to_period_start(freq)
assert new_freq == expected_freq
def test_repartition_input_errors():
df = pd.DataFrame({"x": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(ValueError):
ddf.repartition(npartitions=5, divisions=[None, None])
with pytest.raises(ValueError):
ddf.repartition(npartitions=5, partition_size="5MiB")
def test_embarrassingly_parallel_operations():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")},
index=[10, 20, 30, 40, 50, 60],
)
a = dd.from_pandas(df, 2)
assert_eq(a.x.astype("float32"), df.x.astype("float32"))
assert a.x.astype("float32").compute().dtype == "float32"
assert_eq(a.x.dropna(), df.x.dropna())
assert_eq(a.x.between(2, 4), df.x.between(2, 4))
assert_eq(a.x.clip(2, 4), df.x.clip(2, 4))
assert_eq(a.x.notnull(), df.x.notnull())
assert_eq(a.x.isnull(), df.x.isnull())
assert_eq(a.notnull(), df.notnull())
assert_eq(a.isnull(), df.isnull())
assert len(a.sample(frac=0.5).compute()) < len(df)
def test_fillna():
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.fillna(100), df.fillna(100))
assert_eq(ddf.A.fillna(100), df.A.fillna(100))
assert_eq(ddf.A.fillna(ddf["A"].mean()), df.A.fillna(df["A"].mean()))
assert_eq(ddf.fillna(method="pad"), df.fillna(method="pad"))
assert_eq(ddf.A.fillna(method="pad"), df.A.fillna(method="pad"))
assert_eq(ddf.fillna(method="bfill"), df.fillna(method="bfill"))
assert_eq(ddf.A.fillna(method="bfill"), df.A.fillna(method="bfill"))
assert_eq(ddf.fillna(method="pad", limit=2), df.fillna(method="pad", limit=2))
assert_eq(ddf.A.fillna(method="pad", limit=2), df.A.fillna(method="pad", limit=2))
assert_eq(ddf.fillna(method="bfill", limit=2), df.fillna(method="bfill", limit=2))
assert_eq(
ddf.A.fillna(method="bfill", limit=2), df.A.fillna(method="bfill", limit=2)
)
assert_eq(ddf.fillna(100, axis=1), df.fillna(100, axis=1))
assert_eq(ddf.fillna(method="pad", axis=1), df.fillna(method="pad", axis=1))
assert_eq(
ddf.fillna(method="pad", limit=2, axis=1),
df.fillna(method="pad", limit=2, axis=1),
)
pytest.raises(ValueError, lambda: ddf.A.fillna(0, axis=1))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10))
pytest.raises(NotImplementedError, lambda: ddf.fillna(0, limit=10, axis=1))
df = _compat.makeMissingDataframe()
df.iloc[:15, 0] = np.nan # all NaN partition
ddf = dd.from_pandas(df, npartitions=5, sort=False)
pytest.raises(ValueError, lambda: ddf.fillna(method="pad").compute())
assert_eq(df.fillna(method="pad", limit=3), ddf.fillna(method="pad", limit=3))
@pytest.mark.parametrize("optimize", [True, False])
def test_delayed_roundtrip(optimize: bool):
df1 = d + 1 + 1
delayed = df1.to_delayed(optimize_graph=optimize)
for x in delayed:
assert x.__dask_layers__() == (
"delayed-" + df1._name if optimize else df1._name,
)
x.dask.validate()
assert len(delayed) == df1.npartitions
assert len(delayed[0].dask.layers) == (1 if optimize else 3)
dm = d.a.mean().to_delayed(optimize_graph=optimize)
delayed2 = [x * 2 - dm for x in delayed]
for x in delayed2:
x.dask.validate()
df3 = dd.from_delayed(delayed2, meta=df1, divisions=df1.divisions)
df4 = df3 - 1 - 1
df4.dask.validate()
assert_eq(df4, (full + 2) * 2 - full.a.mean() - 2)
def test_from_delayed_lazy_if_meta_provided():
"""Ensure that the graph is 100% lazily evaluted if meta is provided"""
@dask.delayed
def raise_exception():
raise RuntimeError()
tasks = [raise_exception()]
ddf = dd.from_delayed(tasks, meta=dict(a=float))
with pytest.raises(RuntimeError):
ddf.compute()
def test_from_delayed_empty_meta_provided():
ddf = dd.from_delayed([], meta=dict(a=float))
expected = pd.DataFrame({"a": [0.1]}).iloc[:0]
assert_eq(ddf, expected)
def test_fillna_duplicate_index():
@dask.delayed
def f():
return pd.DataFrame(dict(a=[1.0], b=[np.NaN]))
ddf = dd.from_delayed([f(), f()], meta=dict(a=float, b=float))
ddf.b = ddf.b.fillna(ddf.a)
ddf.compute()
def test_fillna_multi_dataframe():
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.A.fillna(ddf.B), df.A.fillna(df.B))
assert_eq(ddf.B.fillna(ddf.A), df.B.fillna(df.A))
def test_ffill_bfill():
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=5, sort=False)
assert_eq(ddf.ffill(), df.ffill())
assert_eq(ddf.bfill(), df.bfill())
assert_eq(ddf.ffill(axis=1), df.ffill(axis=1))
assert_eq(ddf.bfill(axis=1), df.bfill(axis=1))
def test_fillna_series_types():
# https://github.com/dask/dask/issues/2809
df = pd.DataFrame({"A": [1, np.nan, 3], "B": [1, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=2)
fill_value = pd.Series([1, 10], index=["A", "C"])
assert_eq(ddf.fillna(fill_value), df.fillna(fill_value))
def test_sample():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")},
index=[10, 20, 30, 40, 50, 60],
)
a = dd.from_pandas(df, 2)
b = a.sample(frac=0.5)
assert_eq(b, b)
c = a.sample(frac=0.5, random_state=1234)
d = a.sample(frac=0.5, random_state=1234)
assert_eq(c, d)
assert a.sample(frac=0.5)._name != a.sample(frac=0.5)._name
def test_sample_without_replacement():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")},
index=[10, 20, 30, 40, 50, 60],
)
a = dd.from_pandas(df, 2)
b = a.sample(frac=0.7, replace=False)
bb = b.index.compute()
assert len(bb) == len(set(bb))
def test_sample_raises():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, None, 6], "y": list("abdabd")},
index=[10, 20, 30, 40, 50, 60],
)
a = dd.from_pandas(df, 2)
# Make sure frac is replaced with n when 0 <= n <= 1
# This is so existing code (i.e. ddf.sample(0.5)) won't break
with pytest.warns(UserWarning):
b = a.sample(0.5, random_state=1234)
c = a.sample(frac=0.5, random_state=1234)
assert_eq(b, c)
with pytest.raises(ValueError):
a.sample(n=10)
# Make sure frac is provided
with pytest.raises(ValueError):
a.sample(frac=None)
def test_empty_max():
meta = make_meta({"x": "i8"}, parent_meta=pd.DataFrame())
a = dd.DataFrame(
{("x", 0): pd.DataFrame({"x": [1]}), ("x", 1): pd.DataFrame({"x": []})},
"x",
meta,
[None, None, None],
)
assert_eq(a.x.max(), 1)
def test_query():
pytest.importorskip("numexpr")
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.query("x**2 > y"), df.query("x**2 > y"))
assert_eq(
ddf.query("x**2 > @value", local_dict={"value": 4}),
df.query("x**2 > @value", local_dict={"value": 4}),
)
def test_eval():
pytest.importorskip("numexpr")
p = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
d = dd.from_pandas(p, npartitions=2)
assert_eq(p.eval("x + y"), d.eval("x + y"))
assert_eq(p.eval("z = x + y", inplace=False), d.eval("z = x + y", inplace=False))
with pytest.raises(NotImplementedError):
d.eval("z = x + y", inplace=True)
@pytest.mark.parametrize(
"include, exclude",
[
([int], None),
(None, [int]),
([np.number, object], [float]),
(["datetime"], None),
],
)
def test_select_dtypes(include, exclude):
n = 10
df = pd.DataFrame(
{
"cint": [1] * n,
"cstr": ["a"] * n,
"clfoat": [1.0] * n,
"cdt": pd.date_range("2016-01-01", periods=n),
}
)
a = dd.from_pandas(df, npartitions=2)
result = a.select_dtypes(include=include, exclude=exclude)
expected = df.select_dtypes(include=include, exclude=exclude)
assert_eq(result, expected)
# count dtypes
tm.assert_series_equal(a.dtypes.value_counts(), df.dtypes.value_counts())
tm.assert_series_equal(result.dtypes.value_counts(), expected.dtypes.value_counts())
def test_deterministic_apply_concat_apply_names():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
a = dd.from_pandas(df, npartitions=2)
assert sorted(a.x.nlargest(2).dask) == sorted(a.x.nlargest(2).dask)
assert sorted(a.x.nlargest(2).dask) != sorted(a.x.nlargest(3).dask)
assert sorted(a.x.drop_duplicates().dask) == sorted(a.x.drop_duplicates().dask)
assert sorted(a.groupby("x").y.mean().dask) == sorted(a.groupby("x").y.mean().dask)
# Test aca without passing in token string
f = lambda a: a.nlargest(5)
f2 = lambda a: a.nlargest(3)
assert sorted(aca(a.x, f, f, a.x._meta).dask) != sorted(
aca(a.x, f2, f2, a.x._meta).dask
)
assert sorted(aca(a.x, f, f, a.x._meta).dask) == sorted(
aca(a.x, f, f, a.x._meta).dask
)
# Test aca with keywords
def chunk(x, c_key=0, both_key=0):
return x.sum() + c_key + both_key
def agg(x, a_key=0, both_key=0):
return pd.Series(x).sum() + a_key + both_key
c_key = 2
a_key = 3
both_key = 4
res = aca(
a.x,
chunk=chunk,
aggregate=agg,
chunk_kwargs={"c_key": c_key},
aggregate_kwargs={"a_key": a_key},
both_key=both_key,
)
assert sorted(res.dask) == sorted(
aca(
a.x,
chunk=chunk,
aggregate=agg,
chunk_kwargs={"c_key": c_key},
aggregate_kwargs={"a_key": a_key},
both_key=both_key,
).dask
)
assert sorted(res.dask) != sorted(
aca(
a.x,
chunk=chunk,
aggregate=agg,
chunk_kwargs={"c_key": c_key},
aggregate_kwargs={"a_key": a_key},
both_key=0,
).dask
)
assert_eq(res, df.x.sum() + 2 * (c_key + both_key) + a_key + both_key)
def test_aca_meta_infer():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
def chunk(x, y, constant=1.0):
return (x + y + constant).head()
def agg(x):
return x.head()
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, chunk_kwargs=dict(constant=2.0))
sol = (df + 2.0 + 2.0).head()
assert_eq(res, sol)
# Should infer as a scalar
res = aca(
[ddf.x], chunk=lambda x: pd.Series([x.sum()]), aggregate=lambda x: x.sum()
)
assert isinstance(res, Scalar)
assert res.compute() == df.x.sum()
def test_aca_split_every():
df = pd.DataFrame({"x": [1] * 60})
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, y, constant=0):
return x.sum() + y + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: aca(
[ddf, 2.0],
chunk=chunk,
aggregate=agg,
combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n,
)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert f(15).dask.keys() == f(ddf.npartitions).dask.keys()
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(r3.dask.keys() & r4.dask.keys()) == len(ddf.dask)
# Keywords are different for each step
assert f(3).compute() == 60 + 15 * (2 + 1) + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = aca(
[ddf, 2.0],
chunk=chunk,
aggregate=agg,
combine=combine,
constant=3.0,
split_every=3,
)
assert res.compute() == 60 + 15 * (2 + 3) + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = aca([ddf, 2.0], chunk=chunk, aggregate=agg, constant=3, split_every=3)
assert res.compute() == 60 + 15 * (2 + 3) + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
aca(
[ddf, 2.0],
chunk=chunk,
aggregate=agg,
split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
)
def test_reduction_method():
df = pd.DataFrame({"x": range(50), "y": range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
chunk = lambda x, val=0: (x >= val).sum()
agg = lambda x: x.sum()
# Output of chunk is a scalar
res = ddf.x.reduction(chunk, aggregate=agg)
assert_eq(res, df.x.count())
# Output of chunk is a series
res = ddf.reduction(chunk, aggregate=agg)
assert res._name == ddf.reduction(chunk, aggregate=agg)._name
assert_eq(res, df.count())
# Test with keywords
res2 = ddf.reduction(chunk, aggregate=agg, chunk_kwargs={"val": 25})
res2._name == ddf.reduction(chunk, aggregate=agg, chunk_kwargs={"val": 25})._name
assert res2._name != res._name
assert_eq(res2, (df >= 25).sum())
# Output of chunk is a dataframe
def sum_and_count(x):
return pd.DataFrame({"sum": x.sum(), "count": x.count()})
res = ddf.reduction(sum_and_count, aggregate=lambda x: x.groupby(level=0).sum())
assert_eq(res, pd.DataFrame({"sum": df.sum(), "count": df.count()}))
def test_reduction_method_split_every():
df = pd.Series([1] * 60)
ddf = dd.from_pandas(df, npartitions=15)
def chunk(x, constant=0):
return x.sum() + constant
def combine(x, constant=0):
return x.sum() + constant + 1
def agg(x, constant=0):
return x.sum() + constant + 2
f = lambda n: ddf.reduction(
chunk,
aggregate=agg,
combine=combine,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
split_every=n,
)
assert_max_deps(f(3), 3)
assert_max_deps(f(4), 4, False)
assert_max_deps(f(5), 5)
assert f(15).dask.keys() == f(ddf.npartitions).dask.keys()
r3 = f(3)
r4 = f(4)
assert r3._name != r4._name
# Only intersect on reading operations
assert len(r3.dask.keys() & r4.dask.keys()) == len(ddf.dask)
# Keywords are different for each step
assert f(3).compute() == 60 + 15 + 7 * (2 + 1) + (3 + 2)
# Keywords are same for each step
res = ddf.reduction(
chunk, aggregate=agg, combine=combine, constant=3.0, split_every=3
)
assert res.compute() == 60 + 15 * 3 + 7 * (3 + 1) + (3 + 2)
# No combine provided, combine is agg
res = ddf.reduction(chunk, aggregate=agg, constant=3.0, split_every=3)
assert res.compute() == 60 + 15 * 3 + 8 * (3 + 2)
# split_every must be >= 2
with pytest.raises(ValueError):
f(1)
# combine_kwargs with no combine provided
with pytest.raises(ValueError):
ddf.reduction(
chunk,
aggregate=agg,
split_every=3,
chunk_kwargs=dict(constant=1.0),
combine_kwargs=dict(constant=2.0),
aggregate_kwargs=dict(constant=3.0),
)
def test_pipe():
df = pd.DataFrame({"x": range(50), "y": range(50, 100)})
ddf = dd.from_pandas(df, npartitions=4)
def f(x, y, z=0):
return x + y + z
assert_eq(ddf.pipe(f, 1, z=2), f(ddf, 1, z=2))
assert_eq(ddf.x.pipe(f, 1, z=2), f(ddf.x, 1, z=2))
def test_gh_517():
arr = np.random.randn(100, 2)
df = pd.DataFrame(arr, columns=["a", "b"])
ddf = dd.from_pandas(df, 2)
assert ddf.index.nunique().compute() == 100
ddf2 = dd.from_pandas(pd.concat([df, df]), 5)
assert ddf2.index.nunique().compute() == 100
def test_drop_axis_1():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [5, 6, 7, 8], "z": [9, 10, 11, 12]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.drop("y", axis=1), df.drop("y", axis=1))
assert_eq(ddf.drop(["y", "z"], axis=1), df.drop(["y", "z"], axis=1))
with pytest.raises(ValueError):
ddf.drop(["a", "x"], axis=1)
assert_eq(
ddf.drop(["a", "x"], axis=1, errors="ignore"),
df.drop(["a", "x"], axis=1, errors="ignore"),
)
assert_eq(ddf.drop(columns=["y", "z"]), df.drop(columns=["y", "z"]))
@pytest.mark.parametrize("columns", [["b"], []])
def test_drop_columns(columns):
# Check both populated and empty list argument
# https://github.com/dask/dask/issues/6870
df = pd.DataFrame(
{
"a": [2, 4, 6, 8],
"b": ["1a", "2b", "3c", "4d"],
}
)
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = ddf.drop(columns=columns)
ddf["new"] = ddf["a"] + 1 # Check that ddf2 is not modified
assert_eq(df.drop(columns=columns), ddf2)
def test_gh580():
df = pd.DataFrame({"x": np.arange(10, dtype=float)})
ddf = dd.from_pandas(df, 2)
assert_eq(np.cos(df["x"]), np.cos(ddf["x"]))
assert_eq(np.cos(df["x"]), np.cos(ddf["x"]))
def test_gh6305():
df = pd.DataFrame({"x": np.arange(3, dtype=float)})
ddf = dd.from_pandas(df, 1)
ddf_index_only = ddf.set_index("x")
ds = ddf["x"]
is_broadcastable([ddf_index_only], ds)
def test_rename_dict():
renamer = {"a": "A", "b": "B"}
assert_eq(d.rename(columns=renamer), full.rename(columns=renamer))
def test_rename_function():
renamer = lambda x: x.upper()
assert_eq(d.rename(columns=renamer), full.rename(columns=renamer))
def test_rename_index():
renamer = {0: 1}
pytest.raises(ValueError, lambda: d.rename(index=renamer))
def test_to_timestamp():
index = pd.period_range(freq="A", start="1/1/2001", end="12/1/2004")
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]}, index=index)
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.to_timestamp(), df.to_timestamp(), **CHECK_FREQ)
assert_eq(
ddf.to_timestamp(freq="M", how="s").compute(),
df.to_timestamp(freq="M", how="s"),
**CHECK_FREQ,
)
assert_eq(ddf.x.to_timestamp(), df.x.to_timestamp())
assert_eq(
ddf.x.to_timestamp(freq="M", how="s").compute(),
df.x.to_timestamp(freq="M", how="s"),
**CHECK_FREQ,
)
def test_to_frame():
s = pd.Series([1, 2, 3], name="foo")
a = dd.from_pandas(s, npartitions=2)
assert_eq(s.to_frame(), a.to_frame())
assert_eq(s.to_frame("bar"), a.to_frame("bar"))
@pytest.mark.parametrize("as_frame", [False, False])
def test_to_dask_array_raises(as_frame):
s = pd.Series([1, 2, 3, 4, 5, 6], name="foo")
a = dd.from_pandas(s, npartitions=2)
if as_frame:
a = a.to_frame()
with pytest.raises(ValueError, match="4 != 2"):
a.to_dask_array((1, 2, 3, 4))
with pytest.raises(ValueError, match="Unexpected value"):
a.to_dask_array(5)
@pytest.mark.parametrize("as_frame", [False, True])
def test_to_dask_array_unknown(as_frame):
s = pd.Series([1, 2, 3, 4, 5], name="foo")
a = dd.from_pandas(s, chunksize=2)
if as_frame:
a = a.to_frame()
result = a.to_dask_array()
assert isinstance(result, da.Array)
result = result.chunks
if as_frame:
assert len(result) == 2
assert result[1] == (1,)
else:
assert len(result) == 1
result = result[0]
assert len(result) == 2
assert all(np.isnan(x) for x in result)
@pytest.mark.parametrize(
"lengths,as_frame,meta",
[
([2, 3], False, None),
(True, False, None),
(True, False, np.array([], dtype="f4")),
],
)
def test_to_dask_array(meta, as_frame, lengths):
s = pd.Series([1, 2, 3, 4, 5], name="foo", dtype="i4")
a = dd.from_pandas(s, chunksize=2)
if as_frame:
a = a.to_frame()
result = a.to_dask_array(lengths=lengths, meta=meta)
assert isinstance(result, da.Array)
expected_chunks = ((2, 3),)
if as_frame:
expected_chunks = expected_chunks + ((1,),)
assert result.chunks == expected_chunks
def test_apply():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row["x"] + row["y"]
assert_eq(
ddf.x.apply(lambda x: x + 1, meta=("x", int)), df.x.apply(lambda x: x + 1)
)
# specify meta
assert_eq(
ddf.apply(lambda xy: xy[0] + xy[1], axis=1, meta=(None, int)),
df.apply(lambda xy: xy[0] + xy[1], axis=1),
)
assert_eq(
ddf.apply(lambda xy: xy[0] + xy[1], axis="columns", meta=(None, int)),
df.apply(lambda xy: xy[0] + xy[1], axis="columns"),
)
# inference
with pytest.warns(None):
assert_eq(
ddf.apply(lambda xy: xy[0] + xy[1], axis=1),
df.apply(lambda xy: xy[0] + xy[1], axis=1),
)
with pytest.warns(None):
assert_eq(ddf.apply(lambda xy: xy, axis=1), df.apply(lambda xy: xy, axis=1))
# specify meta
func = lambda x: pd.Series([x, x])
assert_eq(ddf.x.apply(func, meta=[(0, int), (1, int)]), df.x.apply(func))
# inference
with pytest.warns(None):
assert_eq(ddf.x.apply(func), df.x.apply(func))
# axis=0
with pytest.raises(NotImplementedError):
ddf.apply(lambda xy: xy, axis=0)
with pytest.raises(NotImplementedError):
ddf.apply(lambda xy: xy, axis="index")
def test_apply_warns():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row["x"] + row["y"]
with pytest.warns(UserWarning) as w:
ddf.apply(func, axis=1)
assert len(w) == 1
with pytest.warns(None) as w:
ddf.apply(func, axis=1, meta=(None, int))
assert len(w) == 0
with pytest.warns(UserWarning) as w:
ddf.apply(lambda x: x, axis=1)
assert len(w) == 1
assert "'x'" in str(w[0].message)
assert "int64" in str(w[0].message)
def test_apply_warns_with_invalid_meta():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
func = lambda row: row["x"] + row["y"]
with pytest.warns(FutureWarning, match="Meta is not valid"):
ddf.apply(func, axis=1, meta=int)
def test_applymap():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.applymap(lambda x: x + 1), df.applymap(lambda x: x + 1))
assert_eq(ddf.applymap(lambda x: (x, x)), df.applymap(lambda x: (x, x)))
def test_add_prefix():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [4, 5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.add_prefix("abc"), df.add_prefix("abc"))
assert_eq(ddf.x.add_prefix("abc"), df.x.add_prefix("abc"))
def test_add_suffix():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [4, 5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.add_suffix("abc"), df.add_suffix("abc"))
assert_eq(ddf.x.add_suffix("abc"), df.x.add_suffix("abc"))
def test_abs():
df = pd.DataFrame(
{
"A": [1, -2, 3, -4, 5],
"B": [-6.0, -7, -8, -9, 10],
"C": ["a", "b", "c", "d", "e"],
}
)
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.A.abs(), df.A.abs())
assert_eq(ddf[["A", "B"]].abs(), df[["A", "B"]].abs())
pytest.raises(ValueError, lambda: ddf.C.abs())
pytest.raises(TypeError, lambda: ddf.abs())
def test_round():
df = pd.DataFrame({"col1": [1.123, 2.123, 3.123], "col2": [1.234, 2.234, 3.234]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.round(), df.round())
assert_eq(ddf.round(2), df.round(2))
def test_cov():
# DataFrame
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.cov()
res2 = ddf.cov(split_every=2)
res3 = ddf.cov(10)
res4 = ddf.cov(10, split_every=2)
sol = df.cov()
sol2 = df.cov(10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.cov()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.cov(db)
res2 = da.cov(db, split_every=2)
res3 = da.cov(db, 10)
res4 = da.cov(db, 10, split_every=2)
sol = a.cov(b)
sol2 = a.cov(b, 10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.cov(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
def test_corr():
# DataFrame
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=6)
res = ddf.corr()
res2 = ddf.corr(split_every=2)
res3 = ddf.corr(min_periods=10)
res4 = ddf.corr(min_periods=10, split_every=2)
sol = df.corr()
sol2 = df.corr(min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == ddf.corr()._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: ddf.corr(method="spearman"))
# Series
a = df.A
b = df.B
da = dd.from_pandas(a, npartitions=6)
db = dd.from_pandas(b, npartitions=7)
res = da.corr(db)
res2 = da.corr(db, split_every=2)
res3 = da.corr(db, min_periods=10)
res4 = da.corr(db, min_periods=10, split_every=2)
sol = da.corr(db)
sol2 = da.corr(db, min_periods=10)
assert_eq(res, sol)
assert_eq(res2, sol)
assert_eq(res3, sol2)
assert_eq(res4, sol2)
assert res._name == da.corr(db)._name
assert res._name != res2._name
assert res3._name != res4._name
assert res._name != res3._name
pytest.raises(NotImplementedError, lambda: da.corr(db, method="spearman"))
pytest.raises(TypeError, lambda: da.corr(ddf))
def test_corr_same_name():
# Series with same names (see https://github.com/dask/dask/issues/4906)
df = _compat.makeMissingDataframe()
ddf = dd.from_pandas(df, npartitions=6)
result = ddf.A.corr(ddf.B.rename("A"))
expected = ddf.A.corr(ddf.B)
assert_eq(result, expected)
# test with split_every
result2 = ddf.A.corr(ddf.B.rename("A"), split_every=2)
assert_eq(result2, expected)
def test_cov_corr_meta():
df = pd.DataFrame(
{
"a": np.array([1, 2, 3]),
"b": np.array([1.0, 2.0, 3.0], dtype="f4"),
"c": np.array([1.0, 2.0, 3.0]),
},
index=pd.Index([1, 2, 3], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.corr(), df.corr())
assert_eq(ddf.cov(), df.cov())
assert ddf.a.cov(ddf.b)._meta.dtype == "f8"
assert ddf.a.corr(ddf.b)._meta.dtype == "f8"
@pytest.mark.slow
def test_cov_corr_stable():
df = pd.DataFrame(np.random.uniform(-1, 1, (20000000, 2)), columns=["a", "b"])
ddf = dd.from_pandas(df, npartitions=50)
assert_eq(ddf.cov(split_every=8), df.cov())
assert_eq(ddf.corr(split_every=8), df.corr())
def test_cov_corr_mixed():
size = 1000
d = {
"dates": pd.date_range("2015-01-01", periods=size, freq="1T"),
"unique_id": np.arange(0, size),
"ints": np.random.randint(0, size, size=size),
"floats": np.random.randn(size),
"bools": np.random.choice([0, 1], size=size),
"int_nans": np.random.choice([0, 1, np.nan], size=size),
"float_nans": np.random.choice([0.0, 1.0, np.nan], size=size),
"constant": 1,
"int_categorical": np.random.choice([10, 20, 30, 40, 50], size=size),
"categorical_binary": np.random.choice(["a", "b"], size=size),
"categorical_nans": np.random.choice(["a", "b", "c"], size=size),
}
df = pd.DataFrame(d)
df["hardbools"] = df["bools"] == 1
df["categorical_nans"] = df["categorical_nans"].replace("c", np.nan)
df["categorical_binary"] = df["categorical_binary"].astype("category")
df["unique_id"] = df["unique_id"].astype(str)
ddf = dd.from_pandas(df, npartitions=20)
assert_eq(ddf.corr(split_every=4), df.corr(), check_divisions=False)
assert_eq(ddf.cov(split_every=4), df.cov(), check_divisions=False)
def test_autocorr():
x = pd.Series(np.random.random(100))
dx = dd.from_pandas(x, npartitions=10)
assert_eq(dx.autocorr(2), x.autocorr(2))
assert_eq(dx.autocorr(0), x.autocorr(0))
assert_eq(dx.autocorr(-2), x.autocorr(-2))
assert_eq(dx.autocorr(2, split_every=3), x.autocorr(2))
pytest.raises(TypeError, lambda: dx.autocorr(1.5))
def test_apply_infer_columns():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
def return_df(x):
# will create new DataFrame which columns is ['sum', 'mean']
return pd.Series([x.sum(), x.mean()], index=["sum", "mean"])
# DataFrame to completely different DataFrame
with pytest.warns(None):
result = ddf.apply(return_df, axis=1)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(["sum", "mean"]))
assert_eq(result, df.apply(return_df, axis=1))
# DataFrame to Series
with pytest.warns(None):
result = ddf.apply(lambda x: 1, axis=1)
assert isinstance(result, dd.Series)
assert result.name is None
assert_eq(result, df.apply(lambda x: 1, axis=1))
def return_df2(x):
return pd.Series([x * 2, x * 3], index=["x2", "x3"])
# Series to completely different DataFrame
with pytest.warns(None):
result = ddf.x.apply(return_df2)
assert isinstance(result, dd.DataFrame)
tm.assert_index_equal(result.columns, pd.Index(["x2", "x3"]))
assert_eq(result, df.x.apply(return_df2))
# Series to Series
with pytest.warns(None):
result = ddf.x.apply(lambda x: 1)
assert isinstance(result, dd.Series)
assert result.name == "x"
assert_eq(result, df.x.apply(lambda x: 1))
def test_index_time_properties():
i = _compat.makeTimeSeries()
a = dd.from_pandas(i, npartitions=3)
assert "day" in dir(a.index)
# returns a numpy array in pandas, but a Index in dask
assert_eq(a.index.day, pd.Index(i.index.day))
assert_eq(a.index.month, pd.Index(i.index.month))
def test_nlargest_nsmallest():
from string import ascii_lowercase
df = pd.DataFrame(
{
"a": np.random.permutation(20),
"b": list(ascii_lowercase[:20]),
"c": np.random.permutation(20).astype("float64"),
}
)
ddf = dd.from_pandas(df, npartitions=3)
for m in ["nlargest", "nsmallest"]:
f = lambda df, *args, **kwargs: getattr(df, m)(*args, **kwargs)
res = f(ddf, 5, "a")
res2 = f(ddf, 5, "a", split_every=2)
sol = f(df, 5, "a")
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf, 5, ["a", "c"])
res2 = f(ddf, 5, ["a", "c"], split_every=2)
sol = f(df, 5, ["a", "c"])
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
res = f(ddf.a, 5)
res2 = f(ddf.a, 5, split_every=2)
sol = f(df.a, 5)
assert_eq(res, sol)
assert_eq(res2, sol)
assert res._name != res2._name
def test_reset_index():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
sol = df.reset_index()
res = ddf.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.reset_index(drop=True)
res = ddf.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index()
res = ddf.x.reset_index()
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
sol = df.x.reset_index(drop=True)
res = ddf.x.reset_index(drop=True)
assert all(d is None for d in res.divisions)
assert_eq(res, sol, check_index=False)
def test_dataframe_compute_forward_kwargs():
x = dd.from_pandas(pd.DataFrame({"a": range(10)}), npartitions=2).a.sum()
x.compute(bogus_keyword=10)
def test_contains_series_raises_deprecated_warning_preserves_behavior():
s = pd.Series(["a", "b", "c", "d"])
ds = dd.from_pandas(s, npartitions=2)
with pytest.warns(
FutureWarning,
match="Using the ``in`` operator to test for membership in Series is deprecated",
):
output = "a" in ds
assert output
with pytest.warns(
FutureWarning,
match="Using the ``in`` operator to test for membership in Series is deprecated",
):
output = 0 in ds
assert not output
def test_series_iteritems():
df = pd.DataFrame({"x": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df["x"].iteritems(), ddf["x"].iteritems()):
assert a == b
def test_series_iter():
s = pd.DataFrame({"x": [1, 2, 3, 4]})
ds = dd.from_pandas(s, npartitions=2)
for (a, b) in zip(s["x"], ds["x"]):
assert a == b
def test_dataframe_iterrows():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.iterrows(), ddf.iterrows()):
tm.assert_series_equal(a[1], b[1])
def test_dataframe_itertuples():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(), ddf.itertuples()):
assert a == b
@pytest.mark.parametrize(
"columns",
[
("x", "y"),
("x", "x"),
pd.MultiIndex.from_tuples([("x", 1), ("x", 2)], names=("letter", "number")),
],
)
def test_dataframe_items(columns):
df = pd.DataFrame([[1, 10], [2, 20], [3, 30], [4, 40]], columns=columns)
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.items(), ddf.items()):
assert a[0] == b[0] # column name
assert_eq(a[1], b[1].compute()) # column values
def test_dataframe_itertuples_with_index_false():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(index=False), ddf.itertuples(index=False)):
assert a == b
def test_dataframe_itertuples_with_name_none():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 30, 40]})
ddf = dd.from_pandas(df, npartitions=2)
for (a, b) in zip(df.itertuples(name=None), ddf.itertuples(name=None)):
assert a == b
assert type(a) is type(b)
def test_astype():
df = pd.DataFrame(
{"x": [1, 2, 3, None], "y": [10, 20, 30, 40]}, index=[10, 20, 30, 40]
)
a = dd.from_pandas(df, 2)
assert_eq(a.astype(float), df.astype(float))
assert_eq(a.x.astype(float), df.x.astype(float))
def test_astype_categoricals():
df = pd.DataFrame(
{
"x": ["a", "b", "c", "b", "c"],
"y": ["x", "y", "z", "x", "y"],
"z": [1, 2, 3, 4, 5],
}
)
df = df.astype({"y": "category"})
ddf = dd.from_pandas(df, 2)
assert ddf.y.cat.known
ddf2 = ddf.astype({"x": "category"})
assert not ddf2.x.cat.known
assert ddf2.y.cat.known
assert ddf2.x.dtype == "category"
assert ddf2.compute().x.dtype == "category"
dx = ddf.x.astype("category")
assert not dx.cat.known
assert dx.dtype == "category"
assert dx.compute().dtype == "category"
def test_astype_categoricals_known():
df = pd.DataFrame(
{
"x": ["a", "b", "c", "b", "c"],
"y": ["x", "y", "z", "y", "z"],
"z": ["b", "b", "b", "c", "b"],
"other": [1, 2, 3, 4, 5],
}
)
ddf = dd.from_pandas(df, 2)
abc = pd.api.types.CategoricalDtype(["a", "b", "c"], ordered=False)
category = pd.api.types.CategoricalDtype(ordered=False)
# DataFrame
ddf2 = ddf.astype({"x": abc, "y": category, "z": "category", "other": "f8"})
for col, known in [("x", True), ("y", False), ("z", False)]:
x = getattr(ddf2, col)
assert pd.api.types.is_categorical_dtype(x.dtype)
assert x.cat.known == known
# Series
for dtype, known in [("category", False), (category, False), (abc, True)]:
dx2 = ddf.x.astype(dtype)
assert pd.api.types.is_categorical_dtype(dx2.dtype)
assert dx2.cat.known == known
def test_groupby_callable():
a = pd.DataFrame({"x": [1, 2, 3, None], "y": [10, 20, 30, 40]}, index=[1, 2, 3, 4])
b = dd.from_pandas(a, 2)
def iseven(x):
return x % 2 == 0
assert_eq(a.groupby(iseven).y.sum(), b.groupby(iseven).y.sum())
assert_eq(a.y.groupby(iseven).sum(), b.y.groupby(iseven).sum())
def test_methods_tokenize_differently():
df = pd.DataFrame({"x": [1, 2, 3, 4]})
df = dd.from_pandas(df, npartitions=1)
assert (
df.x.map_partitions(lambda x: pd.Series(x.min()))._name
!= df.x.map_partitions(lambda x: pd.Series(x.max()))._name
)
def _assert_info(df, ddf, memory_usage=True):
from io import StringIO
assert isinstance(df, pd.DataFrame)
assert isinstance(ddf, dd.DataFrame)
buf_pd, buf_da = StringIO(), StringIO()
df.info(buf=buf_pd, memory_usage=memory_usage)
ddf.info(buf=buf_da, verbose=True, memory_usage=memory_usage)
stdout_pd = buf_pd.getvalue()
stdout_da = buf_da.getvalue()
stdout_da = stdout_da.replace(str(type(ddf)), str(type(df)))
# TODO
assert stdout_pd == stdout_da
def test_info():
from io import StringIO
pandas_format._put_lines = put_lines
test_frames = [
pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]}, index=[0, 1, 2, 3]),
pd.DataFrame(),
]
for df in test_frames:
ddf = dd.from_pandas(df, npartitions=4)
_assert_info(df, ddf)
buf = StringIO()
ddf = dd.from_pandas(
pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]}, index=range(4)),
npartitions=4,
)
# Verbose=False
ddf.info(buf=buf, verbose=False)
assert buf.getvalue() == (
"<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 2 entries, x to y\n"
"dtypes: int64(2)"
)
# buf=None
assert ddf.info(buf=None) is None
def test_groupby_multilevel_info():
# GH 1844
from io import StringIO
pandas_format._put_lines = put_lines
df = pd.DataFrame({"A": [1, 1, 2, 2], "B": [1, 2, 3, 4], "C": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
g = ddf.groupby(["A", "B"]).sum()
# slight difference between memory repr (single additional space)
_assert_info(g.compute(), g, memory_usage=True)
buf = StringIO()
g.info(buf, verbose=False)
assert buf.getvalue() == (
"<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 1 entries, C to C\n"
"dtypes: int64(1)"
)
# multilevel
g = ddf.groupby(["A", "B"]).agg(["count", "sum"])
_assert_info(g.compute(), g, memory_usage=True)
buf = StringIO()
g.info(buf, verbose=False)
expected = (
"<class 'dask.dataframe.core.DataFrame'>\n"
"Columns: 2 entries, ('C', 'count') to ('C', 'sum')\n"
"dtypes: int64(2)"
)
assert buf.getvalue() == expected
@pytest.mark.skipif(not PANDAS_GT_120, reason="need newer version of Pandas")
def test_categorize_info():
# assert that we can call info after categorize
# workaround for: https://github.com/pydata/pandas/issues/14368
from io import StringIO
pandas_format._put_lines = put_lines
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": pd.Series(list("aabc")), "z": pd.Series(list("aabc"))},
index=[0, 1, 2, 3],
)
ddf = dd.from_pandas(df, npartitions=4).categorize(["y"])
# Verbose=False
buf = StringIO()
ddf.info(buf=buf, verbose=True)
expected = (
"<class 'dask.dataframe.core.DataFrame'>\n"
"Int64Index: 4 entries, 0 to 3\n"
"Data columns (total 3 columns):\n"
" # Column Non-Null Count Dtype\n"
"--- ------ -------------- -----\n"
" 0 x 4 non-null int64\n"
" 1 y 4 non-null category\n"
" 2 z 4 non-null object\n"
"dtypes: category(1), object(1), int64(1)\n"
"memory usage: 496.0 bytes\n"
)
assert buf.getvalue() == expected
def test_gh_1301():
df = pd.DataFrame([["1", "2"], ["3", "4"]])
ddf = dd.from_pandas(df, npartitions=2)
ddf2 = ddf.assign(y=ddf[1].astype(int))
assert_eq(ddf2, df.assign(y=df[1].astype(int)))
assert ddf2.dtypes["y"] == np.dtype(int)
def test_timeseries_sorted():
df = _compat.makeTimeDataFrame()
ddf = dd.from_pandas(df.reset_index(), npartitions=2)
df.index.name = "index"
assert_eq(ddf.set_index("index", sorted=True, drop=True), df)
def test_column_assignment():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=2)
orig = ddf.copy()
ddf["z"] = ddf.x + ddf.y
df["z"] = df.x + df.y
assert_eq(df, ddf)
assert "z" not in orig.columns
def test_array_assignment():
df = pd.DataFrame({"x": np.random.normal(size=50), "y": np.random.normal(size=50)})
ddf = dd.from_pandas(df, npartitions=2)
orig = ddf.copy()
arr = np.array(np.random.normal(size=50))
darr = da.from_array(arr, chunks=25)
df["z"] = arr
ddf["z"] = darr
assert_eq(df, ddf)
assert "z" not in orig.columns
arr = np.array(np.random.normal(size=(50, 50)))
darr = da.from_array(arr, chunks=25)
msg = "Array assignment only supports 1-D arrays"
with pytest.raises(ValueError, match=msg):
ddf["z"] = darr
arr = np.array(np.random.normal(size=50))
darr = da.from_array(arr, chunks=10)
msg = "Number of partitions do not match"
with pytest.raises(ValueError, match=msg):
ddf["z"] = darr
def test_columns_assignment():
df = pd.DataFrame({"x": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
df2 = df.assign(y=df.x + 1, z=df.x - 1)
df[["a", "b"]] = df2[["y", "z"]]
ddf2 = ddf.assign(y=ddf.x + 1, z=ddf.x - 1)
ddf[["a", "b"]] = ddf2[["y", "z"]]
assert_eq(df, ddf)
def test_attribute_assignment():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [1.0, 2.0, 3.0, 4.0, 5.0]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y = ddf.x + ddf.y
assert_eq(ddf, df.assign(y=df.x + df.y))
def test_setitem_triggering_realign():
a = dd.from_pandas(pd.DataFrame({"A": range(12)}), npartitions=3)
b = dd.from_pandas(pd.Series(range(12), name="B"), npartitions=4)
a["C"] = b
assert len(a) == 12
def test_inplace_operators():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [1.0, 2.0, 3.0, 4.0, 5.0]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.y **= 0.5
assert_eq(ddf.y, df.y**0.5)
assert_eq(ddf, df.assign(y=df.y**0.5))
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"idx",
[
np.arange(100),
sorted(np.random.random(size=100)),
pd.date_range("20150101", periods=100),
],
)
def test_idxmaxmin(idx, skipna):
df = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"), index=idx)
df.b.iloc[31] = np.nan
df.d.iloc[78] = np.nan
ddf = dd.from_pandas(df, npartitions=3)
# https://github.com/pandas-dev/pandas/issues/43587
check_dtype = not all(
(_compat.PANDAS_GT_133, skipna is False, isinstance(idx, pd.DatetimeIndex))
)
with warnings.catch_warnings(record=True):
assert_eq(df.idxmax(axis=1, skipna=skipna), ddf.idxmax(axis=1, skipna=skipna))
assert_eq(df.idxmin(axis=1, skipna=skipna), ddf.idxmin(axis=1, skipna=skipna))
assert_eq(
df.idxmax(skipna=skipna), ddf.idxmax(skipna=skipna), check_dtype=check_dtype
)
assert_eq(
df.idxmax(skipna=skipna),
ddf.idxmax(skipna=skipna, split_every=2),
check_dtype=check_dtype,
)
assert (
ddf.idxmax(skipna=skipna)._name
!= ddf.idxmax(skipna=skipna, split_every=2)._name
)
assert_eq(
df.idxmin(skipna=skipna), ddf.idxmin(skipna=skipna), check_dtype=check_dtype
)
assert_eq(
df.idxmin(skipna=skipna),
ddf.idxmin(skipna=skipna, split_every=2),
check_dtype=check_dtype,
)
assert (
ddf.idxmin(skipna=skipna)._name
!= ddf.idxmin(skipna=skipna, split_every=2)._name
)
assert_eq(df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna))
assert_eq(
df.a.idxmax(skipna=skipna), ddf.a.idxmax(skipna=skipna, split_every=2)
)
assert (
ddf.a.idxmax(skipna=skipna)._name
!= ddf.a.idxmax(skipna=skipna, split_every=2)._name
)
assert_eq(df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna))
assert_eq(
df.a.idxmin(skipna=skipna), ddf.a.idxmin(skipna=skipna, split_every=2)
)
assert (
ddf.a.idxmin(skipna=skipna)._name
!= ddf.a.idxmin(skipna=skipna, split_every=2)._name
)
def test_idxmaxmin_empty_partitions():
df = pd.DataFrame(
{"a": [1, 2, 3], "b": [1.5, 2, 3], "c": [np.NaN] * 3, "d": [1, 2, np.NaN]}
)
empty = df.iloc[:0]
ddf = dd.concat(
[dd.from_pandas(df, npartitions=1)]
+ [dd.from_pandas(empty, npartitions=1)] * 10
)
for skipna in [True, False]:
assert_eq(ddf.idxmin(skipna=skipna, split_every=3), df.idxmin(skipna=skipna))
assert_eq(
ddf[["a", "b", "d"]].idxmin(skipna=skipna, split_every=3),
df[["a", "b", "d"]].idxmin(skipna=skipna),
)
assert_eq(ddf.b.idxmax(split_every=3), df.b.idxmax())
# Completely empty raises
ddf = dd.concat([dd.from_pandas(empty, npartitions=1)] * 10)
with pytest.raises(ValueError):
ddf.idxmax().compute()
with pytest.raises(ValueError):
ddf.b.idxmax().compute()
def test_getitem_meta():
data = {"col1": ["a", "a", "b"], "col2": [0, 1, 0]}
df = pd.DataFrame(data=data, columns=["col1", "col2"])
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(df.col2[df.col1 == "a"], ddf.col2[ddf.col1 == "a"])
def test_getitem_multilevel():
pdf = pd.DataFrame({("A", "0"): [1, 2, 2], ("B", "1"): [1, 2, 3]})
ddf = dd.from_pandas(pdf, npartitions=3)
assert_eq(pdf["A", "0"], ddf["A", "0"])
assert_eq(pdf[[("A", "0"), ("B", "1")]], ddf[[("A", "0"), ("B", "1")]])
def test_getitem_string_subclass():
df = pd.DataFrame({"column_1": list(range(10))})
ddf = dd.from_pandas(df, npartitions=3)
class string_subclass(str):
pass
column_1 = string_subclass("column_1")
assert_eq(df[column_1], ddf[column_1])
@pytest.mark.parametrize("col_type", [list, np.array, pd.Series, pd.Index])
def test_getitem_column_types(col_type):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
cols = col_type(["C", "A", "B"])
assert_eq(df[cols], ddf[cols])
def test_getitem_with_bool_dataframe_as_key():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
assert_eq(df[df > 3], ddf[ddf > 3])
def test_getitem_with_non_series():
s = pd.Series(list(range(10)), index=list("abcdefghij"))
ds = dd.from_pandas(s, npartitions=3)
assert_eq(s[["a", "b"]], ds[["a", "b"]])
def test_ipython_completion():
df = pd.DataFrame({"a": [1], "b": [2]})
ddf = dd.from_pandas(df, npartitions=1)
completions = ddf._ipython_key_completions_()
assert "a" in completions
assert "b" in completions
assert "c" not in completions
def test_diff():
df = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"))
ddf = dd.from_pandas(df, 5)
assert_eq(ddf.diff(), df.diff())
assert_eq(ddf.diff(0), df.diff(0))
assert_eq(ddf.diff(2), df.diff(2))
assert_eq(ddf.diff(-2), df.diff(-2))
assert_eq(ddf.diff(2, axis=1), df.diff(2, axis=1))
assert_eq(ddf.a.diff(), df.a.diff())
assert_eq(ddf.a.diff(0), df.a.diff(0))
assert_eq(ddf.a.diff(2), df.a.diff(2))
assert_eq(ddf.a.diff(-2), df.a.diff(-2))
assert ddf.diff(2)._name == ddf.diff(2)._name
assert ddf.diff(2)._name != ddf.diff(3)._name
pytest.raises(TypeError, lambda: ddf.diff(1.5))
def test_shift():
df = _compat.makeTimeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
# DataFrame
assert_eq(ddf.shift(), df.shift())
assert_eq(ddf.shift(0), df.shift(0))
assert_eq(ddf.shift(2), df.shift(2))
assert_eq(ddf.shift(-2), df.shift(-2))
assert_eq(ddf.shift(2, axis=1), df.shift(2, axis=1))
# Series
assert_eq(ddf.A.shift(), df.A.shift())
assert_eq(ddf.A.shift(0), df.A.shift(0))
assert_eq(ddf.A.shift(2), df.A.shift(2))
assert_eq(ddf.A.shift(-2), df.A.shift(-2))
with pytest.raises(TypeError):
ddf.shift(1.5)
@pytest.mark.parametrize("data_freq,divs1", [("B", False), ("D", True), ("H", True)])
def test_shift_with_freq_DatetimeIndex(data_freq, divs1):
df = _compat.makeTimeDataFrame()
df = df.set_index(_compat.makeDateIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq, divs2 in [("S", True), ("W", False), (pd.Timedelta(10, unit="h"), True)]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions == divs2
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs1
@pytest.mark.parametrize("data_freq,divs", [("B", False), ("D", True), ("H", True)])
def test_shift_with_freq_PeriodIndex(data_freq, divs):
df = _compat.makeTimeDataFrame()
# PeriodIndex
df = df.set_index(pd.period_range("2000-01-01", periods=30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for d, p in [(ddf, df), (ddf.A, df.A)]:
res = d.shift(2, freq=data_freq)
assert_eq(res, p.shift(2, freq=data_freq))
assert res.known_divisions == divs
# PeriodIndex.shift doesn't have `freq` parameter
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions == divs
df = _compat.makeTimeDataFrame()
with pytest.raises(ValueError):
ddf.index.shift(2, freq="D") # freq keyword not supported
def test_shift_with_freq_TimedeltaIndex():
df = _compat.makeTimeDataFrame()
# TimedeltaIndex
for data_freq in ["T", "D", "H"]:
df = df.set_index(_compat.makeTimedeltaIndex(30, freq=data_freq))
ddf = dd.from_pandas(df, npartitions=4)
for freq in ["S", pd.Timedelta(10, unit="h")]:
for d, p in [(ddf, df), (ddf.A, df.A), (ddf.index, df.index)]:
res = d.shift(2, freq=freq)
assert_eq(res, p.shift(2, freq=freq))
assert res.known_divisions
# Index shifts also work with freq=None
res = ddf.index.shift(2)
assert_eq(res, df.index.shift(2))
assert res.known_divisions
def test_shift_with_freq_errors():
# Other index types error
df = _compat.makeDataFrame()
ddf = dd.from_pandas(df, npartitions=4)
pytest.raises(NotImplementedError, lambda: ddf.shift(2, freq="S"))
pytest.raises(NotImplementedError, lambda: ddf.A.shift(2, freq="S"))
pytest.raises(NotImplementedError, lambda: ddf.index.shift(2))
@pytest.mark.parametrize("method", ["first", "last"])
def test_first_and_last(method):
f = lambda x, offset: getattr(x, method)(offset)
freqs = ["12h", "D"]
offsets = ["0d", "100h", "20d", "20B", "3W", "3M", "400d", "13M"]
for freq in freqs:
index = pd.date_range("1/1/2000", "1/1/2001", freq=freq)[::4]
df = pd.DataFrame(
np.random.random((len(index), 4)), index=index, columns=["A", "B", "C", "D"]
)
ddf = dd.from_pandas(df, npartitions=10)
for offset in offsets:
assert_eq(f(ddf, offset), f(df, offset))
assert_eq(f(ddf.A, offset), f(df.A, offset))
@pytest.mark.parametrize("npartitions", [1, 4, 20])
@pytest.mark.parametrize("split_every", [2, 5])
@pytest.mark.parametrize("split_out", [None, 1, 5, 20])
def test_hash_split_unique(npartitions, split_every, split_out):
from string import ascii_lowercase
s = pd.Series(np.random.choice(list(ascii_lowercase), 1000, replace=True))
ds = dd.from_pandas(s, npartitions=npartitions)
dropped = ds.unique(split_every=split_every, split_out=split_out)
dsk = dropped.__dask_optimize__(dropped.dask, dropped.__dask_keys__())
from dask.core import get_deps
dependencies, dependents = get_deps(dsk)
assert len([k for k, v in dependencies.items() if not v]) == npartitions
assert dropped.npartitions == (split_out or 1)
assert sorted(dropped.compute(scheduler="sync")) == sorted(s.unique())
@pytest.mark.parametrize("split_every", [None, 2])
def test_split_out_drop_duplicates(split_every):
x = np.concatenate([np.arange(10)] * 100)[:, None]
y = x.copy()
z = np.concatenate([np.arange(20)] * 50)[:, None]
rs = np.random.RandomState(1)
rs.shuffle(x)
rs.shuffle(y)
rs.shuffle(z)
df = pd.DataFrame(np.concatenate([x, y, z], axis=1), columns=["x", "y", "z"])
ddf = dd.from_pandas(df, npartitions=20)
for subset, keep in product([None, ["x", "z"]], ["first", "last"]):
sol = df.drop_duplicates(subset=subset, keep=keep)
res = ddf.drop_duplicates(
subset=subset, keep=keep, split_every=split_every, split_out=10
)
assert res.npartitions == 10
assert_eq(sol, res)
@pytest.mark.parametrize("split_every", [None, 2])
def test_split_out_value_counts(split_every):
df = pd.DataFrame({"x": [1, 2, 3] * 100})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.x.value_counts(split_out=10, split_every=split_every).npartitions == 10
assert_eq(
ddf.x.value_counts(split_out=10, split_every=split_every), df.x.value_counts()
)
def test_values():
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
ddf = dd.from_pandas(df, 2)
assert_eq(df.values, ddf.values)
assert_eq(df.x.values, ddf.x.values)
assert_eq(df.y.values, ddf.y.values)
assert_eq(df.index.values, ddf.index.values)
def test_copy():
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, npartitions=2)
b = a.copy()
c = a.copy(deep=False)
a["y"] = a.x * 2
assert_eq(b, df)
assert_eq(c, df)
deep_err = (
"The `deep` value must be False. This is strictly a shallow copy "
"of the underlying computational graph."
)
for deep in [True, None, ""]:
with pytest.raises(ValueError, match=deep_err):
a.copy(deep=deep)
def test_del():
df = pd.DataFrame(
{"x": ["a", "b", "c", "d"], "y": [2, 3, 4, 5]},
index=pd.Index([1.0, 2.0, 3.0, 4.0], name="ind"),
)
a = dd.from_pandas(df, 2)
b = a.copy()
del a["x"]
assert_eq(b, df)
del df["x"]
assert_eq(a, df)
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage(index, deep):
df = pd.DataFrame({"x": [1, 2, 3], "y": [1.0, 2.0, 3.0], "z": ["a", "b", "c"]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(
df.memory_usage(index=index, deep=deep),
ddf.memory_usage(index=index, deep=deep),
)
assert (
df.x.memory_usage(index=index, deep=deep)
== ddf.x.memory_usage(index=index, deep=deep).compute()
)
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("deep", [True, False])
def test_memory_usage_per_partition(index, deep):
df = pd.DataFrame(
{
"x": [1, 2, 3, 4, 5],
"y": [1.0, 2.0, 3.0, 4.0, 5.0],
"z": ["a", "b", "c", "d", "e"],
}
)
ddf = dd.from_pandas(df, npartitions=2)
# DataFrame.memory_usage_per_partition
expected = pd.Series(
part.compute().memory_usage(index=index, deep=deep).sum()
for part in ddf.partitions
)
result = ddf.memory_usage_per_partition(index=index, deep=deep)
assert_eq(expected, result)
# Series.memory_usage_per_partition
expected = pd.Series(
part.x.compute().memory_usage(index=index, deep=deep) for part in ddf.partitions
)
result = ddf.x.memory_usage_per_partition(index=index, deep=deep)
assert_eq(expected, result)
@pytest.mark.parametrize(
"reduction",
[
"sum",
"mean",
"std",
"var",
"count",
"min",
"max",
"idxmin",
"idxmax",
"prod",
"all",
"sem",
],
)
def test_dataframe_reductions_arithmetic(reduction):
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": [1.1, 2.2, 3.3, 4.4, 5.5]})
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(
ddf - (getattr(ddf, reduction)() + 1), df - (getattr(df, reduction)() + 1)
)
def test_dataframe_mode():
data = [["Tom", 10, 7], ["Farahn", 14, 7], ["Julie", 14, 5], ["Nick", 10, 10]]
df = pd.DataFrame(data, columns=["Name", "Num", "Num"])
ddf = dd.from_pandas(df, npartitions=3)
assert_eq(ddf.mode(), df.mode())
# name is not preserved in older pandas
assert_eq(ddf.Name.mode(), df.Name.mode(), check_names=PANDAS_GT_140)
# test empty
df = pd.DataFrame(columns=["a", "b"])
ddf = dd.from_pandas(df, npartitions=1)
# check_index=False should be removed once https://github.com/pandas-dev/pandas/issues/33321 is resolved.
assert_eq(ddf.mode(), df.mode(), check_index=False)
def test_datetime_loc_open_slicing():
dtRange = pd.date_range("01.01.2015", "05.05.2015")
df = pd.DataFrame(np.random.random((len(dtRange), 2)), index=dtRange)
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(df.loc[:"02.02.2015"], ddf.loc[:"02.02.2015"])
assert_eq(df.loc["02.02.2015":], ddf.loc["02.02.2015":])
assert_eq(df[0].loc[:"02.02.2015"], ddf[0].loc[:"02.02.2015"])
assert_eq(df[0].loc["02.02.2015":], ddf[0].loc["02.02.2015":])
def test_to_datetime():
df = pd.DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]})
df.index.name = "ix"
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(pd.to_datetime(df), dd.to_datetime(ddf))
s = pd.Series(["3/11/2000", "3/12/2000", "3/13/2000"] * 100)
s.index = s.values
ds = dd.from_pandas(s, npartitions=10, sort=False)
assert_eq(
pd.to_datetime(s, infer_datetime_format=True),
dd.to_datetime(ds, infer_datetime_format=True),
)
assert_eq(
pd.to_datetime(s.index, infer_datetime_format=True),
dd.to_datetime(ds.index, infer_datetime_format=True),
check_divisions=False,
)
assert_eq(
pd.to_datetime(s, utc=True),
dd.to_datetime(ds, utc=True),
)
for arg in ("2021-08-03", 2021):
with pytest.raises(NotImplementedError, match="non-index-able arguments"):
dd.to_datetime(arg)
def test_to_timedelta():
s = pd.Series(range(10))
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.to_timedelta(s), dd.to_timedelta(ds))
assert_eq(pd.to_timedelta(s, unit="h"), dd.to_timedelta(ds, unit="h"))
s = pd.Series([1, 2, "this will error"])
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.to_timedelta(s, errors="coerce"), dd.to_timedelta(ds, errors="coerce"))
@pytest.mark.parametrize("values", [[np.NaN, 0], [1, 1]])
def test_isna(values):
s = pd.Series(values)
ds = dd.from_pandas(s, npartitions=2)
assert_eq(pd.isna(s), dd.isna(ds))
@pytest.mark.parametrize("drop", [0, 9])
def test_slice_on_filtered_boundary(drop):
# https://github.com/dask/dask/issues/2211
x = np.arange(10)
x[[5, 6]] -= 2
df = pd.DataFrame({"A": x, "B": np.arange(len(x))})
pdf = df.set_index("A").query(f"B != {drop}")
ddf = dd.from_pandas(df, 1).set_index("A").query(f"B != {drop}")
result = dd.concat([ddf, ddf.rename(columns={"B": "C"})], axis=1)
expected = pd.concat([pdf, pdf.rename(columns={"B": "C"})], axis=1)
assert_eq(result, expected)
def test_boundary_slice_nonmonotonic():
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = methods.boundary_slice(df, 0, 4)
expected = df.iloc[2:]
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -1, 4)
expected = df.drop(-2)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 3)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 3.5)
expected = df.drop(4)
tm.assert_frame_equal(result, expected)
result = methods.boundary_slice(df, -2, 4)
expected = df
tm.assert_frame_equal(result, expected)
def test_boundary_slice_empty():
df = pd.DataFrame()
result = methods.boundary_slice(df, 1, 4)
expected = pd.DataFrame()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"start, stop, right_boundary, left_boundary, drop",
[
(-1, None, False, False, [-1, -2]),
(-1, None, False, True, [-2]),
(None, 3, False, False, [3, 4]),
(None, 3, True, False, [4]),
# Missing keys
(-0.5, None, False, False, [-1, -2]),
(-0.5, None, False, True, [-1, -2]),
(-1.5, None, False, True, [-2]),
(None, 3.5, False, False, [4]),
(None, 3.5, True, False, [4]),
(None, 2.5, False, False, [3, 4]),
],
)
def test_with_boundary(start, stop, right_boundary, left_boundary, drop):
x = np.array([-1, -2, 2, 4, 3])
df = pd.DataFrame({"B": range(len(x))}, index=x)
result = methods.boundary_slice(df, start, stop, right_boundary, left_boundary)
expected = df.drop(drop)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index, left, right",
[
(range(10), 0, 9),
(range(10), -1, None),
(range(10), None, 10),
([-1, 0, 2, 1], None, None),
([-1, 0, 2, 1], -1, None),
([-1, 0, 2, 1], None, 2),
([-1, 0, 2, 1], -2, 3),
(pd.date_range("2017", periods=10), None, None),
(pd.date_range("2017", periods=10), pd.Timestamp("2017"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2017-01-10")),
(pd.date_range("2017", periods=10), pd.Timestamp("2016"), None),
(pd.date_range("2017", periods=10), None, pd.Timestamp("2018")),
],
)
def test_boundary_slice_same(index, left, right):
df = pd.DataFrame({"A": range(len(index))}, index=index)
result = methods.boundary_slice(df, left, right)
tm.assert_frame_equal(result, df)
def test_better_errors_object_reductions():
# GH2452
s = pd.Series(["a", "b", "c", "d"])
ds = dd.from_pandas(s, npartitions=2)
with pytest.raises(ValueError) as err:
ds.mean()
assert str(err.value) == "`mean` not supported with object series"
def test_sample_empty_partitions():
@dask.delayed
def make_df(n):
return pd.DataFrame(np.zeros((n, 4)), columns=list("abcd"))
ddf = dd.from_delayed([make_df(0), make_df(100), make_df(0)])
ddf2 = ddf.sample(frac=0.2)
# smoke test sample on empty partitions
res = ddf2.compute()
assert res.dtypes.equals(ddf2.dtypes)
def test_coerce():
df = pd.DataFrame(np.arange(100).reshape((10, 10)))
ddf = dd.from_pandas(df, npartitions=2)
funcs = (int, float, complex)
for d, t in product(funcs, (ddf, ddf[0])):
pytest.raises(TypeError, lambda: t(d))
def test_bool():
df = pd.DataFrame(np.arange(100).reshape((10, 10)))
ddf = dd.from_pandas(df, npartitions=2)
conditions = [ddf, ddf[0], ddf == ddf, ddf[0] == ddf[0]]
for cond in conditions:
with pytest.raises(ValueError):
bool(cond)
def test_cumulative_multiple_columns():
# GH 3037
df = pd.DataFrame(np.random.randn(100, 5), columns=list("abcde"))
ddf = dd.from_pandas(df, 5)
for d in [ddf, df]:
for c in df.columns:
d[c + "cs"] = d[c].cumsum()
d[c + "cmin"] = d[c].cummin()
d[c + "cmax"] = d[c].cummax()
d[c + "cp"] = d[c].cumprod()
assert_eq(ddf, df)
@pytest.mark.parametrize("func", [np.asarray, M.to_records])
def test_map_partition_array(func):
from dask.array.utils import assert_eq
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5], "y": [6.0, 7.0, 8.0, 9.0, 10.0]},
index=["a", "b", "c", "d", "e"],
)
ddf = dd.from_pandas(df, npartitions=2)
for pre in [lambda a: a, lambda a: a.x, lambda a: a.y, lambda a: a.index]:
try:
expected = func(pre(df))
except Exception:
continue
x = pre(ddf).map_partitions(func)
assert_eq(x, expected, check_type=False) # TODO: make check_type pass
assert isinstance(x, da.Array)
assert x.chunks[0] == (np.nan, np.nan)
def test_map_partition_sparse():
sparse = pytest.importorskip("sparse")
# Avoid searchsorted failure.
pytest.importorskip("numba", minversion="0.40.0")
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5], "y": [6.0, 7.0, 8.0, 9.0, 10.0]},
index=["a", "b", "c", "d", "e"],
)
ddf = dd.from_pandas(df, npartitions=2)
def f(d):
return sparse.COO(np.array(d))
for pre in [lambda a: a, lambda a: a.x]:
expected = f(pre(df))
result = pre(ddf).map_partitions(f)
assert isinstance(result, da.Array)
computed = result.compute()
assert (computed.data == expected.data).all()
assert (computed.coords == expected.coords).all()
def test_mixed_dask_array_operations():
df = pd.DataFrame({"x": [1, 2, 3]}, index=[4, 5, 6])
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.x + df.x.values, ddf.x + ddf.x.values)
assert_eq(df.x.values + df.x, ddf.x.values + ddf.x)
assert_eq(df.x + df.index.values, ddf.x + ddf.index.values)
assert_eq(df.index.values + df.x, ddf.index.values + ddf.x)
assert_eq(df.x + df.x.values.sum(), ddf.x + ddf.x.values.sum())
def test_mixed_dask_array_operations_errors():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]}, index=[4, 5, 6, 7, 8])
ddf = dd.from_pandas(df, npartitions=2)
x = da.arange(5, chunks=((1, 4),))
x._chunks = ((np.nan, np.nan),)
with pytest.raises(ValueError):
(ddf.x + x).compute()
x = da.arange(5, chunks=((2, 2, 1),))
with pytest.raises(ValueError) as info:
ddf.x + x
assert "add" in str(info.value)
def test_mixed_dask_array_multi_dimensional():
df = pd.DataFrame(
{"x": [1, 2, 3, 4, 5], "y": [5.0, 6.0, 7.0, 8.0, 9.0]}, columns=["x", "y"]
)
ddf = dd.from_pandas(df, npartitions=2)
x = (df.values + 1).astype(float)
dx = (ddf.values + 1).astype(float)
assert_eq(ddf + dx + 1, df + x + 1)
assert_eq(ddf + dx.rechunk((None, 1)) + 1, df + x + 1)
assert_eq(ddf[["y", "x"]] + dx + 1, df[["y", "x"]] + x + 1)
def test_meta_raises():
# Raise when we use a user defined function
s = pd.Series(["abcd", "abcd"])
ds = dd.from_pandas(s, npartitions=2)
try:
ds.map(lambda x: x[3])
except ValueError as e:
assert "meta=" in str(e)
# But not otherwise
df = pd.DataFrame({"a": ["x", "y", "y"], "b": ["x", "y", "z"], "c": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
with pytest.raises(Exception) as info:
ddf.a + ddf.c
assert "meta=" not in str(info.value)
def test_meta_nonempty_uses_meta_value_if_provided():
# https://github.com/dask/dask/issues/6958
base = pd.Series([1, 2, 3], dtype="datetime64[ns]")
offsets = pd.Series([pd.offsets.DateOffset(years=o) for o in range(3)])
dask_base = dd.from_pandas(base, npartitions=1)
dask_offsets = dd.from_pandas(offsets, npartitions=1)
dask_offsets._meta = offsets.head()
with pytest.warns(None): # not vectorized performance warning
expected = base + offsets
actual = dask_base + dask_offsets
assert_eq(expected, actual)
def test_dask_dataframe_holds_scipy_sparse_containers():
sparse = pytest.importorskip("scipy.sparse")
da = pytest.importorskip("dask.array")
x = da.random.random((1000, 10), chunks=(100, 10))
x[x < 0.9] = 0
df = dd.from_dask_array(x)
y = df.map_partitions(sparse.csr_matrix)
assert isinstance(y, da.Array)
vs = y.to_delayed().flatten().tolist()
values = dask.compute(*vs, scheduler="single-threaded")
assert all(isinstance(v, sparse.csr_matrix) for v in values)
def test_map_partitions_delays_large_inputs():
df = pd.DataFrame({"x": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
big = np.ones(1000000)
b = ddf.map_partitions(lambda x, y: x, y=big)
assert any(big is v for v in b.dask.values())
a = ddf.map_partitions(lambda x, y: x, big)
assert any(big is v for v in a.dask.values())
def test_partitions_indexer():
df = pd.DataFrame({"x": range(10)})
ddf = dd.from_pandas(df, npartitions=5)
assert_eq(ddf.partitions[0], ddf.get_partition(0))
assert_eq(ddf.partitions[3], ddf.get_partition(3))
assert_eq(ddf.partitions[-1], ddf.get_partition(4))
assert ddf.partitions[:3].npartitions == 3
assert ddf.x.partitions[:3].npartitions == 3
assert ddf.x.partitions[::2].compute().tolist() == [0, 1, 4, 5, 8, 9]
def test_mod_eq():
df = pd.DataFrame({"a": [1, 2, 3]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(df, ddf)
assert_eq(df.a, ddf.a)
assert_eq(df.a + 2, ddf.a + 2)
assert_eq(df.a + 2 == 0, ddf.a + 2 == 0)
def test_setitem():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4]})
ddf = dd.from_pandas(df.copy(), 2)
df[df.columns] = 1
ddf[ddf.columns] = 1
assert_eq(df, ddf)
def test_setitem_with_bool_dataframe_as_key():
df = pd.DataFrame({"A": [1, 4], "B": [3, 2]})
ddf = dd.from_pandas(df.copy(), 2)
df[df > 2] = 5
ddf[ddf > 2] = 5
assert_eq(df, ddf)
def test_setitem_with_bool_series_as_key():
df = pd.DataFrame({"A": [1, 4], "B": [3, 2]})
ddf = dd.from_pandas(df.copy(), 2)
df[df["A"] > 2] = 5
ddf[ddf["A"] > 2] = 5
assert_eq(df, ddf)
def test_setitem_with_numeric_column_name_raises_not_implemented():
df = pd.DataFrame({0: [1, 4], 1: [3, 2]})
ddf = dd.from_pandas(df.copy(), 2)
# works for pandas
df[0] = 5
# raises error for dask
with pytest.raises(NotImplementedError, match="not supported"):
ddf[0] = 5
def test_broadcast():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf - (ddf.sum() + 1), df - (df.sum() + 1))
def test_scalar_with_array():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
da.utils.assert_eq(df.x.values + df.x.mean(), ddf.x.values + ddf.x.mean())
def test_has_parallel_type():
assert has_parallel_type(pd.DataFrame())
assert has_parallel_type(pd.Series(dtype=float))
assert not has_parallel_type(123)
def test_meta_error_message():
with pytest.raises(TypeError) as info:
dd.DataFrame({("x", 1): 123}, "x", pd.Series(dtype=float), [None, None])
assert "Series" in str(info.value)
assert "DataFrame" in str(info.value)
assert "pandas" in str(info.value)
def test_map_index():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert ddf.known_divisions is True
cleared = ddf.index.map(lambda x: x * 10)
assert cleared.known_divisions is False
applied = ddf.index.map(lambda x: x * 10, is_monotonic=True)
assert applied.known_divisions is True
assert applied.divisions == tuple(x * 10 for x in ddf.divisions)
def test_assign_index():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
ddf_copy = ddf.copy()
ddf.index = ddf.index * 10
expected = df.copy()
expected.index = expected.index * 10
assert_eq(ddf, expected)
assert_eq(ddf_copy, df)
def test_index_divisions():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf.index + 1, df.index + 1)
assert_eq(10 * ddf.index, 10 * df.index)
assert_eq(-ddf.index, -df.index)
def test_replace():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(df.replace(1, 10), ddf.replace(1, 10))
assert_eq(df.replace({1: 10, 2: 20}), ddf.replace({1: 10, 2: 20}))
assert_eq(df.x.replace(1, 10), ddf.x.replace(1, 10))
assert_eq(df.x.replace({1: 10, 2: 20}), ddf.x.replace({1: 10, 2: 20}))
def test_map_partitions_delays_lists():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5]})
ddf = dd.from_pandas(df, npartitions=2)
L = list(range(100))
out = ddf.map_partitions(lambda x, y: x + sum(y), y=L)
assert any(str(L) == str(v) for v in out.__dask_graph__().values())
out = ddf.map_partitions(lambda x, y: x + sum(y), L)
assert any(str(L) == str(v) for v in out.__dask_graph__().values())
def test_dtype_cast():
df = pd.DataFrame(
{
"A": np.arange(10, dtype=np.int32),
"B": np.arange(10, dtype=np.int64),
"C": np.arange(10, dtype=np.float32),
}
)
ddf = dd.from_pandas(df, npartitions=2)
assert ddf.A.dtype == np.int32
assert ddf.B.dtype == np.int64
assert ddf.C.dtype == np.float32
col = pd.Series(np.arange(10, dtype=np.float32)) / 2
assert col.dtype == np.float32
ddf = ddf.assign(D=col)
assert ddf.D.dtype == np.float32
assert ddf.C.dtype == np.float32
# fails
assert ddf.B.dtype == np.int64
# fails
assert ddf.A.dtype == np.int32
@pytest.mark.parametrize("base_npart", [1, 4])
@pytest.mark.parametrize("map_npart", [1, 3])
@pytest.mark.parametrize("sorted_index", [False, True])
@pytest.mark.parametrize("sorted_map_index", [False, True])
def test_series_map(base_npart, map_npart, sorted_index, sorted_map_index):
base = pd.Series(
["".join(np.random.choice(["a", "b", "c"], size=3)) for x in range(100)]
)
if not sorted_index:
index = np.arange(100)
np.random.shuffle(index)
base.index = index
map_index = ["".join(x) for x in product("abc", repeat=3)]
mapper = pd.Series(np.random.randint(50, size=len(map_index)), index=map_index)
if not sorted_map_index:
map_index = np.array(map_index)
np.random.shuffle(map_index)
mapper.index = map_index
expected = base.map(mapper)
dask_base = dd.from_pandas(base, npartitions=base_npart, sort=False)
dask_map = dd.from_pandas(mapper, npartitions=map_npart, sort=False)
result = dask_base.map(dask_map)
dd.utils.assert_eq(expected, result)
def test_dataframe_explode():
df = pd.DataFrame({"A": [[1, 2, 3], "foo", [3, 4]], "B": 1})
exploded_df = df.explode("A")
ddf = dd.from_pandas(df, npartitions=2)
exploded_ddf = ddf.explode("A")
assert ddf.divisions == exploded_ddf.divisions
assert_eq(exploded_ddf.compute(), exploded_df)
def test_series_explode():
s = pd.Series([[1, 2, 3], "foo", [3, 4]])
exploded_s = s.explode()
ds = dd.from_pandas(s, npartitions=2)
exploded_ds = ds.explode()
assert_eq(exploded_ds, exploded_s)
assert ds.divisions == exploded_ds.divisions
def test_pop():
df = pd.DataFrame({"x": range(10), "y": range(10)})
ddf = dd.from_pandas(df, npartitions=2)
s = ddf.pop("y")
assert s.name == "y"
assert ddf.columns == ["x"]
assert_eq(ddf, df[["x"]])
@pytest.mark.parametrize("dropna", [True, False])
@pytest.mark.parametrize("axis", [0, 1])
def test_nunique(dropna, axis):
df = pd.DataFrame(
{"x": ["a", "a", "c"], "y": [None, 1, 2], "c": np.arange(0, 1, 0.4)}
)
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf["y"].nunique(dropna=dropna), df["y"].nunique(dropna=dropna))
assert_eq(
ddf.nunique(dropna=dropna, axis=axis), df.nunique(dropna=dropna, axis=axis)
)
def test_view():
data = {
"x": pd.Series(range(5), dtype="int8"),
"y": pd.Series(
[
"2021-11-27 00:05:02.175274",
"2021-11-27 00:05:05.205596",
"2021-11-27 00:05:29.212572",
"2021-11-27 00:05:25.708343",
"2021-11-27 00:05:47.714958",
],
dtype="datetime64[ns]",
),
}
df = pd.DataFrame(data)
ddf = dd.from_pandas(df, npartitions=2)
assert_eq(ddf["x"].view("uint8"), df["x"].view("uint8"))
assert_eq(ddf["y"].view("int64"), df["y"].view("int64"))
def test_simple_map_partitions():
data = {"col_0": [9, -3, 0, -1, 5], "col_1": [-2, -7, 6, 8, -5]}
df = pd.DataFrame(data)
ddf = dd.from_pandas(df, npartitions=2)
ddf = ddf.clip(-4, 6)
task = ddf.__dask_graph__()[ddf.__dask_keys__()[0]]
[v] = task[0].dsk.values()
assert v[0] == M.clip or v[1] == M.clip
def test_iter():
df = pd.DataFrame({"A": [1, 2, 3, 4], "B": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, 2)
assert list(df) == list(ddf)
for col, expected in zip(ddf, ["A", "B"]):
assert col == expected
def test_dataframe_groupby_cumsum_agg_empty_partitions():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=4)
assert_eq(ddf[ddf.x < 5].x.cumsum(), df[df.x < 5].x.cumsum())
assert_eq(ddf[ddf.x > 5].x.cumsum(), df[df.x > 5].x.cumsum())
def test_dataframe_groupby_cumprod_agg_empty_partitions():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=4)
assert_eq(ddf[ddf.x < 5].x.cumprod(), df[df.x < 5].x.cumprod())
assert_eq(ddf[ddf.x > 5].x.cumprod(), df[df.x > 5].x.cumprod())
def test_fuse_roots():
pdf1 = pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [3, 5, 2, 5, 7, 2, 4, 2, 4]}
)
ddf1 = dd.from_pandas(pdf1, 2)
pdf2 = pd.DataFrame({"a": [True, False, True] * 3, "b": [False, False, True] * 3})
ddf2 = dd.from_pandas(pdf2, 2)
res = ddf1.where(ddf2)
hlg = fuse_roots(res.__dask_graph__(), keys=res.__dask_keys__())
hlg.validate()
def test_attrs_dataframe():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
df.attrs = {"date": "2020-10-16"}
ddf = dd.from_pandas(df, 2)
assert df.attrs == ddf.attrs
assert df.abs().attrs == ddf.abs().attrs
def test_attrs_series():
s = pd.Series([1, 2], name="A")
s.attrs["unit"] = "kg"
ds = dd.from_pandas(s, 2)
assert s.attrs == ds.attrs
assert s.fillna(1).attrs == ds.fillna(1).attrs
@pytest.mark.xfail(reason="df.iloc[:0] does not keep the series attrs")
def test_attrs_series_in_dataframes():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
df.A.attrs["unit"] = "kg"
ddf = dd.from_pandas(df, 2)
# Fails because the pandas iloc method doesn't currently persist
# the attrs dict for series in a dataframe. Dask uses df.iloc[:0]
# when creating the _meta dataframe in make_meta_pandas(x, index=None).
# Should start xpassing when df.iloc works. Remove the xfail then.
assert df.A.attrs == ddf.A.attrs
def test_join_series():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=1)
expected_df = dd.from_pandas(df.join(df["x"], lsuffix="_"), npartitions=1)
actual_df = ddf.join(ddf["x"], lsuffix="_")
assert_eq(actual_df, expected_df)
def test_dask_layers():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6, 7, 8]})
ddf = dd.from_pandas(df, npartitions=2)
assert ddf.dask.layers.keys() == {ddf._name}
assert ddf.dask.dependencies == {ddf._name: set()}
assert ddf.__dask_layers__() == (ddf._name,)
dds = ddf["x"]
assert dds.dask.layers.keys() == {ddf._name, dds._name}
assert dds.dask.dependencies == {ddf._name: set(), dds._name: {ddf._name}}
assert dds.__dask_layers__() == (dds._name,)
ddi = dds.min()
assert ddi.key[1:] == (0,)
# Note that the `min` operation will use two layers
# now that ACA uses uses HLG
assert {ddf._name, dds._name, ddi.key[0]}.issubset(ddi.dask.layers.keys())
assert len(ddi.dask.layers) == 4
assert ddi.dask.dependencies[ddf._name] == set()
assert ddi.dask.dependencies[dds._name] == {ddf._name}
assert len(ddi.dask.dependencies) == 4
assert ddi.__dask_layers__() == (ddi.key[0],)
def test_repr_html_dataframe_highlevelgraph():
pytest.importorskip("jinja2")
x = timeseries().shuffle("id", shuffle="tasks").head(compute=False)
hg = x.dask
assert xml.etree.ElementTree.fromstring(hg._repr_html_()) is not None
for layer in hg.layers.values():
assert xml.etree.ElementTree.fromstring(layer._repr_html_()) is not None
@pytest.mark.skipif(
not dd._compat.PANDAS_GT_120, reason="Float64 was introduced in pandas>=1.2"
)
def test_assign_na_float_columns():
# See https://github.com/dask/dask/issues/7156
df_pandas = pd.DataFrame({"a": [1.1]}, dtype="Float64")
df = dd.from_pandas(df_pandas, npartitions=1)
df = df.assign(new_col=df["a"])
assert df.compute()["a"].dtypes == "Float64"
assert df.compute()["new_col"].dtypes == "Float64"
def test_dot():
s1 = pd.Series([1, 2, 3, 4])
s2 = pd.Series([4, 5, 6, 6])
df = pd.DataFrame({"one": s1, "two": s2})
dask_s1 = dd.from_pandas(s1, npartitions=1)
dask_df = dd.from_pandas(df, npartitions=1)
dask_s2 = dd.from_pandas(s2, npartitions=1)
assert_eq(s1.dot(s2), dask_s1.dot(dask_s2))
assert_eq(s1.dot(df), dask_s1.dot(dask_df))
# With partitions
partitioned_s1 = dd.from_pandas(s1, npartitions=2)
partitioned_df = dd.from_pandas(df, npartitions=2)
partitioned_s2 = dd.from_pandas(s2, npartitions=2)
assert_eq(s1.dot(s2), partitioned_s1.dot(partitioned_s2))
assert_eq(s1.dot(df), partitioned_s1.dot(partitioned_df))
# Test passing meta kwarg
res = dask_s1.dot(dask_df, meta=pd.Series([1], name="test_series")).compute()
assert res.name == "test_series"
# Test validation of second operand
with pytest.raises(TypeError):
dask_s1.dot(da.array([1, 2, 3, 4]))
def test_dot_nan():
# Test that nan inputs match pandas' behavior
s1 = pd.Series([1, 2, 3, 4])
dask_s1 = dd.from_pandas(s1, npartitions=1)
s2 = pd.Series([np.nan, np.nan, np.nan, np.nan])
dask_s2 = dd.from_pandas(s2, npartitions=1)
df = pd.DataFrame({"one": s1, "two": s2})
dask_df = dd.from_pandas(df, npartitions=1)
assert_eq(s1.dot(s2), dask_s1.dot(dask_s2))
assert_eq(s2.dot(df), dask_s2.dot(dask_df))
def test_use_of_weakref_proxy():
"""Testing wrapping frames in proxy wrappers"""
df = pd.DataFrame({"data": [1, 2, 3]})
df_pxy = weakref.proxy(df)
ser = pd.Series({"data": [1, 2, 3]})
ser_pxy = weakref.proxy(ser)
assert is_dataframe_like(df_pxy)
assert is_series_like(ser_pxy)
assert dask.dataframe.groupby._cov_chunk(df_pxy, "data")
assert isinstance(
dask.dataframe.groupby._groupby_apply_funcs(df_pxy, "data", funcs=[]),
pd.DataFrame,
)
# Test wrapping each Dask dataframe chunk in a proxy
l = []
def f(x):
l.append(x) # Keep `x` alive
return weakref.proxy(x)
d = pd.DataFrame({"g": [0, 0, 1] * 3, "b": [1, 2, 3] * 3})
a = dd.from_pandas(d, npartitions=1)
a = a.map_partitions(f, meta=a._meta)
pxy = weakref.proxy(a)
res = pxy["b"].groupby(pxy["g"]).sum()
isinstance(res.compute(), pd.Series)
@contextlib.contextmanager
def check_is_monotonic_warning():
# `is_monotonic` was deprecated starting in `pandas=1.5.0`
if not PANDAS_GT_150:
with contextlib.nullcontext() as ctx:
yield ctx
else:
with pytest.warns(FutureWarning, match="is_monotonic is deprecated") as ctx:
yield ctx
def test_is_monotonic_numeric():
s = pd.Series(range(20))
ds = dd.from_pandas(s, npartitions=5)
assert_eq(s.is_monotonic_increasing, ds.is_monotonic_increasing)
with check_is_monotonic_warning():
expected = s.is_monotonic
with check_is_monotonic_warning():
result = ds.is_monotonic
assert_eq(expected, result)
s_2 = pd.Series(range(20, 0, -1))
ds_2 = dd.from_pandas(s_2, npartitions=5)
assert_eq(s_2.is_monotonic_decreasing, ds_2.is_monotonic_decreasing)
s_3 = pd.Series(list(range(0, 5)) + list(range(0, 20)))
ds_3 = dd.from_pandas(s_3, npartitions=5)
assert_eq(s_3.is_monotonic_increasing, ds_3.is_monotonic_increasing)
assert_eq(s_3.is_monotonic_decreasing, ds_3.is_monotonic_decreasing)
def test_is_monotonic_dt64():
s = pd.Series(pd.date_range("20130101", periods=10))
ds = dd.from_pandas(s, npartitions=5)
assert_eq(s.is_monotonic_increasing, ds.is_monotonic_increasing)
s_2 = pd.Series(list(reversed(s)))
ds_2 = dd.from_pandas(s_2, npartitions=5)
assert_eq(s_2.is_monotonic_decreasing, ds_2.is_monotonic_decreasing)
def test_index_is_monotonic_numeric():
s = pd.Series(1, index=range(20))
ds = dd.from_pandas(s, npartitions=5, sort=False)
assert_eq(s.index.is_monotonic_increasing, ds.index.is_monotonic_increasing)
with check_is_monotonic_warning():
expected = s.index.is_monotonic
with check_is_monotonic_warning():
result = ds.index.is_monotonic
assert_eq(expected, result)
s_2 = pd.Series(1, index=range(20, 0, -1))
ds_2 = dd.from_pandas(s_2, npartitions=5, sort=False)
assert_eq(s_2.index.is_monotonic_decreasing, ds_2.index.is_monotonic_decreasing)
s_3 = pd.Series(1, index=list(range(0, 5)) + list(range(0, 20)))
ds_3 = dd.from_pandas(s_3, npartitions=5, sort=False)
assert_eq(s_3.index.is_monotonic_increasing, ds_3.index.is_monotonic_increasing)
assert_eq(s_3.index.is_monotonic_decreasing, ds_3.index.is_monotonic_decreasing)
def test_index_is_monotonic_dt64():
s = pd.Series(1, index=pd.date_range("20130101", periods=10))
ds = dd.from_pandas(s, npartitions=5, sort=False)
assert_eq(s.index.is_monotonic_increasing, ds.index.is_monotonic_increasing)
s_2 = pd.Series(1, index=list(reversed(s)))
ds_2 = dd.from_pandas(s_2, npartitions=5, sort=False)
assert_eq(s_2.index.is_monotonic_decreasing, ds_2.index.is_monotonic_decreasing)
def test_custom_map_reduce():
# Make sure custom map-reduce workflows can use
# the universal ACA code path with metadata
# that is not DataFrame-like.
# See: https://github.com/dask/dask/issues/8636
df = pd.DataFrame(columns=["a"], data=[[2], [4], [8]], index=[0, 1, 2])
ddf = dd.from_pandas(df, npartitions=2)
def map_fn(x):
return {"x": x, "y": x}
def reduce_fn(series):
merged = None
for mapped in series:
if merged is None:
merged = mapped.copy()
else:
merged["x"] += mapped["x"]
merged["y"] *= mapped["y"]
return merged
result = (
ddf["a"]
.map(map_fn, meta=("data", "object"))
.reduction(reduce_fn, aggregate=reduce_fn, meta=("data", "object"))
.compute()[0]
)
assert result == {"x": 14, "y": 64}
|
blaze/dask
|
dask/dataframe/tests/test_dataframe.py
|
Python
|
bsd-3-clause
| 159,857
|
# Copyright (C) 2015, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import warnings
import numpy as np
from numpy.linalg import LinAlgError
from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq)
from scipy.sparse.linalg.isolve.utils import make_system
__all__ = ['gcrotmk']
def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(),
prepend_outer_v=False):
"""
FGMRES Arnoldi process, with optional projection or augmentation
Parameters
----------
matvec : callable
Operation A*x
v0 : ndarray
Initial vector, normalized to nrm2(v0) == 1
m : int
Number of GMRES rounds
atol : float
Absolute tolerance for early exit
lpsolve : callable
Left preconditioner L
rpsolve : callable
Right preconditioner R
CU : list of (ndarray, ndarray)
Columns of matrices C and U in GCROT
outer_v : list of ndarrays
Augmentation vectors in LGMRES
prepend_outer_v : bool, optional
Whether augmentation vectors come before or after
Krylov iterates
Raises
------
LinAlgError
If nans encountered
Returns
-------
Q, R : ndarray
QR decomposition of the upper Hessenberg H=QR
B : ndarray
Projections corresponding to matrix C
vs : list of ndarray
Columns of matrix V
zs : list of ndarray
Columns of matrix Z
y : ndarray
Solution to ||H y - e_1||_2 = min!
res : float
The final (preconditioned) residual norm
"""
if lpsolve is None:
lpsolve = lambda x: x
if rpsolve is None:
rpsolve = lambda x: x
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,))
vs = [v0]
zs = []
y = None
res = np.nan
m = m + len(outer_v)
# Orthogonal projection coefficients
B = np.zeros((len(cs), m), dtype=v0.dtype)
# H is stored in QR factorized form
Q = np.ones((1, 1), dtype=v0.dtype)
R = np.zeros((1, 0), dtype=v0.dtype)
eps = np.finfo(v0.dtype).eps
breakdown = False
# FGMRES Arnoldi process
for j in range(m):
# L A Z = C B + V H
if prepend_outer_v and j < len(outer_v):
z, w = outer_v[j]
elif prepend_outer_v and j == len(outer_v):
z = rpsolve(v0)
w = None
elif not prepend_outer_v and j >= m - len(outer_v):
z, w = outer_v[j - (m - len(outer_v))]
else:
z = rpsolve(vs[-1])
w = None
if w is None:
w = lpsolve(matvec(z))
else:
# w is clobbered below
w = w.copy()
w_norm = nrm2(w)
# GCROT projection: L A -> (1 - C C^H) L A
# i.e. orthogonalize against C
for i, c in enumerate(cs):
alpha = dot(c, w)
B[i,j] = alpha
w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c
# Orthogonalize against V
hcur = np.zeros(j+2, dtype=Q.dtype)
for i, v in enumerate(vs):
alpha = dot(v, w)
hcur[i] = alpha
w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v
hcur[i+1] = nrm2(w)
with np.errstate(over='ignore', divide='ignore'):
# Careful with denormals
alpha = 1/hcur[-1]
if np.isfinite(alpha):
w = scal(alpha, w)
if not (hcur[-1] > eps * w_norm):
# w essentially in the span of previous vectors,
# or we have nans. Bail out after updating the QR
# solution.
breakdown = True
vs.append(w)
zs.append(z)
# Arnoldi LSQ problem
# Add new column to H=Q@R, padding other columns with zeros
Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F')
Q2[:j+1,:j+1] = Q
Q2[j+1,j+1] = 1
R2 = np.zeros((j+2, j), dtype=R.dtype, order='F')
R2[:j+1,:] = R
Q, R = qr_insert(Q2, R2, hcur, j, which='col',
overwrite_qru=True, check_finite=False)
# Transformed least squares problem
# || Q R y - inner_res_0 * e_1 ||_2 = min!
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
# Residual is immediately known
res = abs(Q[0,-1])
# Check for termination
if res < atol or breakdown:
break
if not np.isfinite(R[j,j]):
# nans encountered, bail out
raise LinAlgError()
# -- Get the LSQ problem solution
# The problem is triangular, but the condition number may be
# bad (or in case of breakdown the last diagonal entry may be
# zero), so use lstsq instead of trtrs.
y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj())
B = B[:,:j+1]
return Q, R, B, vs, zs, y, res
def gcrotmk(A, b, x0=None, tol=1e-5, maxiter=1000, M=None, callback=None,
m=20, k=None, CU=None, discard_C=False, truncate='oldest',
atol=None):
"""
Solve a matrix equation using flexible GCROT(m,k) algorithm.
Parameters
----------
A : {sparse matrix, ndarray, LinearOperator}
The real or complex N-by-N matrix of the linear system.
Alternatively, ``A`` can be a linear operator which can
produce ``Ax`` using, e.g.,
``scipy.sparse.linalg.LinearOperator``.
b : ndarray
Right hand side of the linear system. Has shape (N,) or (N,1).
x0 : ndarray
Starting guess for the solution.
tol, atol : float, optional
Tolerances for convergence, ``norm(residual) <= max(tol*norm(b), atol)``.
The default for ``atol`` is `tol`.
.. warning::
The default value for `atol` will be changed in a future release.
For future compatibility, specify `atol` explicitly.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after maxiter
steps even if the specified tolerance has not been achieved.
M : {sparse matrix, ndarray, LinearOperator}, optional
Preconditioner for A. The preconditioner should approximate the
inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner
can vary from iteration to iteration. Effective preconditioning
dramatically improves the rate of convergence, which implies that
fewer iterations are needed to reach a given error tolerance.
callback : function, optional
User-supplied function to call after each iteration. It is called
as callback(xk), where xk is the current solution vector.
m : int, optional
Number of inner FGMRES iterations per each outer iteration.
Default: 20
k : int, optional
Number of vectors to carry between inner FGMRES iterations.
According to [2]_, good values are around m.
Default: m
CU : list of tuples, optional
List of tuples ``(c, u)`` which contain the columns of the matrices
C and U in the GCROT(m,k) algorithm. For details, see [2]_.
The list given and vectors contained in it are modified in-place.
If not given, start from empty matrices. The ``c`` elements in the
tuples can be ``None``, in which case the vectors are recomputed
via ``c = A u`` on start and orthogonalized as described in [3]_.
discard_C : bool, optional
Discard the C-vectors at the end. Useful if recycling Krylov subspaces
for different linear systems.
truncate : {'oldest', 'smallest'}, optional
Truncation scheme to use. Drop: oldest vectors, or vectors with
smallest singular values using the scheme discussed in [1,2].
See [2]_ for detailed comparison.
Default: 'oldest'
Returns
-------
x : ndarray
The solution found.
info : int
Provides convergence information:
* 0 : successful exit
* >0 : convergence to tolerance not achieved, number of iterations
References
----------
.. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
methods'', SIAM J. Numer. Anal. 36, 864 (1999).
.. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
of GCROT for solving nonsymmetric linear systems'',
SIAM J. Sci. Comput. 32, 172 (2010).
.. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
''Recycling Krylov subspaces for sequences of linear systems'',
SIAM J. Sci. Comput. 28, 1651 (2006).
"""
A,M,x,b,postprocess = make_system(A,M,x0,b)
if not np.isfinite(b).all():
raise ValueError("RHS must contain only finite numbers")
if truncate not in ('oldest', 'smallest'):
raise ValueError("Invalid value for 'truncate': %r" % (truncate,))
if atol is None:
warnings.warn("scipy.sparse.linalg.gcrotmk called without specifying `atol`. "
"The default value will change in the future. To preserve "
"current behavior, set ``atol=tol``.",
category=DeprecationWarning, stacklevel=2)
atol = tol
matvec = A.matvec
psolve = M.matvec
if CU is None:
CU = []
if k is None:
k = m
axpy, dot, scal = None, None, None
r = b - matvec(x)
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r))
b_norm = nrm2(b)
if b_norm == 0:
x = b
return (postprocess(x), 0)
if discard_C:
CU[:] = [(None, u) for c, u in CU]
# Reorthogonalize old vectors
if CU:
# Sort already existing vectors to the front
CU.sort(key=lambda cu: cu[0] is not None)
# Fill-in missing ones
C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
us = []
j = 0
while CU:
# More memory-efficient: throw away old vectors as we go
c, u = CU.pop(0)
if c is None:
c = matvec(u)
C[:,j] = c
j += 1
us.append(u)
# Orthogonalize
Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
del C
# C := Q
cs = list(Q.T)
# U := U P R^-1, back-substitution
new_us = []
for j in range(len(cs)):
u = us[P[j]]
for i in range(j):
u = axpy(us[P[i]], u, u.shape[0], -R[i,j])
if abs(R[j,j]) < 1e-12 * abs(R[0,0]):
# discard rest of the vectors
break
u = scal(1.0/R[j,j], u)
new_us.append(u)
# Form the new CU lists
CU[:] = list(zip(cs, new_us))[::-1]
if CU:
axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,))
# Solve first the projection operation with respect to the CU
# vectors. This corresponds to modifying the initial guess to
# be
#
# x' = x + U y
# y = argmin_y || b - A (x + U y) ||^2
#
# The solution is y = C^H (b - A x)
for c, u in CU:
yc = dot(c, r)
x = axpy(u, x, x.shape[0], yc)
r = axpy(c, r, r.shape[0], -yc)
# GCROT main iteration
for j_outer in range(maxiter):
# -- callback
if callback is not None:
callback(x)
beta = nrm2(r)
# -- check stopping condition
beta_tol = max(atol, tol * b_norm)
if beta <= beta_tol and (j_outer > 0 or CU):
# recompute residual to avoid rounding error
r = b - matvec(x)
beta = nrm2(r)
if beta <= beta_tol:
j_outer = -1
break
ml = m + max(k - len(CU), 0)
cs = [c for c, u in CU]
try:
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
r/beta,
ml,
rpsolve=psolve,
atol=max(atol, tol*b_norm)/beta,
cs=cs)
y *= beta
except LinAlgError:
# Floating point over/underflow, non-finite result from
# matmul etc. -- report failure.
break
#
# At this point,
#
# [A U, A Z] = [C, V] G; G = [ I B ]
# [ 0 H ]
#
# where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
#
# || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
#
# from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
#
#
# GCROT(m,k) update
#
# Define new outer vectors
# ux := (Z - U B) y
ux = zs[0]*y[0]
for z, yc in zip(zs[1:], y[1:]):
ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc
by = B.dot(y)
for cu, byc in zip(CU, by):
c, u = cu
ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc
# cx := V H y
hy = Q.dot(R.dot(y))
cx = vs[0] * hy[0]
for v, hyc in zip(vs[1:], hy[1:]):
cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc
# Normalize cx, maintaining cx = A ux
# This new cx is orthogonal to the previous C, by construction
try:
alpha = 1/nrm2(cx)
if not np.isfinite(alpha):
raise FloatingPointError()
except (FloatingPointError, ZeroDivisionError):
# Cannot update, so skip it
continue
cx = scal(alpha, cx)
ux = scal(alpha, ux)
# Update residual and solution
gamma = dot(cx, r)
r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx
x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux
# Truncate CU
if truncate == 'oldest':
while len(CU) >= k and CU:
del CU[0]
elif truncate == 'smallest':
if len(CU) >= k and CU:
# cf. [1,2]
D = solve(R[:-1,:].T, B.T).T
W, sigma, V = svd(D)
# C := C W[:,:k-1], U := U W[:,:k-1]
new_CU = []
for j, w in enumerate(W[:,:k-1].T):
c, u = CU[0]
c = c * w[0]
u = u * w[0]
for cup, wp in zip(CU[1:], w[1:]):
cp, up = cup
c = axpy(cp, c, c.shape[0], wp)
u = axpy(up, u, u.shape[0], wp)
# Reorthogonalize at the same time; not necessary
# in exact arithmetic, but floating point error
# tends to accumulate here
for cp, up in new_CU:
alpha = dot(cp, c)
c = axpy(cp, c, c.shape[0], -alpha)
u = axpy(up, u, u.shape[0], -alpha)
alpha = nrm2(c)
c = scal(1.0/alpha, c)
u = scal(1.0/alpha, u)
new_CU.append((c, u))
CU[:] = new_CU
# Add new vector to CU
CU.append((cx, ux))
# Include the solution vector to the span
CU.append((None, x.copy()))
if discard_C:
CU[:] = [(None, uz) for cz, uz in CU]
return postprocess(x), j_outer + 1
|
grlee77/scipy
|
scipy/sparse/linalg/isolve/_gcrotmk.py
|
Python
|
bsd-3-clause
| 15,556
|
#!/usr/bin/env python
# Copyright 2012 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Reads a .isolated, creates a tree of hardlinks and runs the test.
To improve performance, it keeps a local cache. The local cache can safely be
deleted.
Any ${ISOLATED_OUTDIR} on the command line will be replaced by the location of a
temporary directory upon execution of the command specified in the .isolated
file. All content written to this directory will be uploaded upon termination
and the .isolated file describing this directory will be printed to stdout.
"""
__version__ = '0.4.1'
import logging
import optparse
import os
import sys
import tempfile
from third_party.depot_tools import fix_encoding
from utils import file_path
from utils import on_error
from utils import subprocess42
from utils import tools
from utils import zip_package
import auth
import isolated_format
import isolateserver
# Absolute path to this file (can be None if running from zip on Mac).
THIS_FILE_PATH = os.path.abspath(__file__) if __file__ else None
# Directory that contains this file (might be inside zip package).
BASE_DIR = os.path.dirname(THIS_FILE_PATH) if __file__ else None
# Directory that contains currently running script file.
if zip_package.get_main_script_path():
MAIN_DIR = os.path.dirname(
os.path.abspath(zip_package.get_main_script_path()))
else:
# This happens when 'import run_isolated' is executed at the python
# interactive prompt, in that case __file__ is undefined.
MAIN_DIR = None
# The name of the log file to use.
RUN_ISOLATED_LOG_FILE = 'run_isolated.log'
# The name of the log to use for the run_test_cases.py command
RUN_TEST_CASES_LOG = 'run_test_cases.log'
def get_as_zip_package(executable=True):
"""Returns ZipPackage with this module and all its dependencies.
If |executable| is True will store run_isolated.py as __main__.py so that
zip package is directly executable be python.
"""
# Building a zip package when running from another zip package is
# unsupported and probably unneeded.
assert not zip_package.is_zipped_module(sys.modules[__name__])
assert THIS_FILE_PATH
assert BASE_DIR
package = zip_package.ZipPackage(root=BASE_DIR)
package.add_python_file(THIS_FILE_PATH, '__main__.py' if executable else None)
package.add_python_file(os.path.join(BASE_DIR, 'isolated_format.py'))
package.add_python_file(os.path.join(BASE_DIR, 'isolateserver.py'))
package.add_python_file(os.path.join(BASE_DIR, 'auth.py'))
package.add_directory(os.path.join(BASE_DIR, 'third_party'))
package.add_directory(os.path.join(BASE_DIR, 'utils'))
return package
def make_temp_dir(prefix, root_dir):
"""Returns a temporary directory on the same file system as root_dir."""
base_temp_dir = None
if (root_dir and
not file_path.is_same_filesystem(root_dir, tempfile.gettempdir())):
base_temp_dir = os.path.dirname(root_dir)
return tempfile.mkdtemp(prefix=prefix, dir=base_temp_dir)
def change_tree_read_only(rootdir, read_only):
"""Changes the tree read-only bits according to the read_only specification.
The flag can be 0, 1 or 2, which will affect the possibility to modify files
and create or delete files.
"""
if read_only == 2:
# Files and directories (except on Windows) are marked read only. This
# inhibits modifying, creating or deleting files in the test directory,
# except on Windows where creating and deleting files is still possible.
file_path.make_tree_read_only(rootdir)
elif read_only == 1:
# Files are marked read only but not the directories. This inhibits
# modifying files but creating or deleting files is still possible.
file_path.make_tree_files_read_only(rootdir)
elif read_only in (0, None):
# Anything can be modified.
# TODO(maruel): This is currently dangerous as long as DiskCache.touch()
# is not yet changed to verify the hash of the content of the files it is
# looking at, so that if a test modifies an input file, the file must be
# deleted.
file_path.make_tree_writeable(rootdir)
else:
raise ValueError(
'change_tree_read_only(%s, %s): Unknown flag %s' %
(rootdir, read_only, read_only))
def process_command(command, out_dir):
"""Replaces isolated specific variables in a command line."""
filtered = []
for arg in command:
if '${ISOLATED_OUTDIR}' in arg:
arg = arg.replace('${ISOLATED_OUTDIR}', out_dir).replace('/', os.sep)
filtered.append(arg)
return filtered
def run_tha_test(isolated_hash, storage, cache, leak_temp_dir, extra_args):
"""Downloads the dependencies in the cache, hardlinks them into a temporary
directory and runs the executable from there.
A temporary directory is created to hold the output files. The content inside
this directory will be uploaded back to |storage| packaged as a .isolated
file.
Arguments:
isolated_hash: the SHA-1 of the .isolated file that must be retrieved to
recreate the tree of files to run the target executable.
storage: an isolateserver.Storage object to retrieve remote objects. This
object has a reference to an isolateserver.StorageApi, which does
the actual I/O.
cache: an isolateserver.LocalCache to keep from retrieving the same objects
constantly by caching the objects retrieved. Can be on-disk or
in-memory.
leak_temp_dir: if true, the temporary directory will be deliberately leaked
for later examination.
extra_args: optional arguments to add to the command stated in the .isolate
file.
"""
run_dir = make_temp_dir('run_tha_test', cache.cache_dir)
out_dir = unicode(make_temp_dir('isolated_out', cache.cache_dir))
result = 0
try:
try:
bundle = isolateserver.fetch_isolated(
isolated_hash=isolated_hash,
storage=storage,
cache=cache,
outdir=run_dir,
require_command=True)
except isolated_format.IsolatedError:
on_error.report(None)
return 1
change_tree_read_only(run_dir, bundle.read_only)
cwd = os.path.normpath(os.path.join(run_dir, bundle.relative_cwd))
command = bundle.command + extra_args
file_path.ensure_command_has_abs_path(command, cwd)
command = process_command(command, out_dir)
logging.info('Running %s, cwd=%s' % (command, cwd))
# TODO(csharp): This should be specified somewhere else.
# TODO(vadimsh): Pass it via 'env_vars' in manifest.
# Add a rotating log file if one doesn't already exist.
env = os.environ.copy()
if MAIN_DIR:
env.setdefault('RUN_TEST_CASES_LOG_FILE',
os.path.join(MAIN_DIR, RUN_TEST_CASES_LOG))
sys.stdout.flush()
with tools.Profiler('RunTest'):
try:
with subprocess42.Popen_with_handler(command, cwd=cwd, env=env) as p:
p.communicate()
result = p.returncode
except OSError:
on_error.report('Failed to run %s; cwd=%s' % (command, cwd))
result = 1
logging.info(
'Command finished with exit code %d (%s)',
result, hex(0xffffffff & result))
finally:
try:
if leak_temp_dir:
logging.warning('Deliberately leaking %s for later examination',
run_dir)
else:
try:
if not file_path.rmtree(run_dir):
print >> sys.stderr, (
'Failed to delete the temporary directory, forcibly failing\n'
'the task because of it. No zombie process can outlive a\n'
'successful task run and still be marked as successful.\n'
'Fix your stuff.')
result = result or 1
except OSError:
logging.warning('Leaking %s', run_dir)
result = 1
# HACK(vadimsh): On Windows rmtree(run_dir) call above has
# a synchronization effect: it finishes only when all task child processes
# terminate (since a running process locks *.exe file). Examine out_dir
# only after that call completes (since child processes may
# write to out_dir too and we need to wait for them to finish).
# Upload out_dir and generate a .isolated file out of this directory.
# It is only done if files were written in the directory.
if os.path.isdir(out_dir) and os.listdir(out_dir):
with tools.Profiler('ArchiveOutput'):
results = isolateserver.archive_files_to_storage(
storage, [out_dir], None)
# TODO(maruel): Implement side-channel to publish this information.
output_data = {
'hash': results[0][0],
'namespace': storage.namespace,
'storage': storage.location,
}
sys.stdout.flush()
print(
'[run_isolated_out_hack]%s[/run_isolated_out_hack]' %
tools.format_json(output_data, dense=True))
finally:
try:
if os.path.isdir(out_dir) and not file_path.rmtree(out_dir):
result = result or 1
except OSError:
# The error was already printed out. Report it but that's it. Only
# report on non-Windows or on Windows when the process had succeeded.
# Due to the way file sharing works on Windows, it's sadly expected that
# file deletion may fail when a test failed.
if sys.platform != 'win32' or not result:
on_error.report(None)
result = 1
return result
def main(args):
tools.disable_buffering()
parser = tools.OptionParserWithLogging(
usage='%prog <options>',
version=__version__,
log_file=RUN_ISOLATED_LOG_FILE)
data_group = optparse.OptionGroup(parser, 'Data source')
data_group.add_option(
'-s', '--isolated',
help='Hash of the .isolated to grab from the isolate server')
data_group.add_option(
'-H', dest='isolated', help=optparse.SUPPRESS_HELP)
isolateserver.add_isolate_server_options(data_group)
parser.add_option_group(data_group)
isolateserver.add_cache_options(parser)
parser.set_defaults(cache='cache')
debug_group = optparse.OptionGroup(parser, 'Debugging')
debug_group.add_option(
'--leak-temp-dir',
action='store_true',
help='Deliberately leak isolate\'s temp dir for later examination '
'[default: %default]')
parser.add_option_group(debug_group)
auth.add_auth_options(parser)
options, args = parser.parse_args(args)
if not options.isolated:
parser.error('--isolated is required.')
auth.process_auth_options(parser, options)
isolateserver.process_isolate_server_options(parser, options, True)
cache = isolateserver.process_cache_options(options)
with isolateserver.get_storage(
options.isolate_server, options.namespace) as storage:
# Hashing schemes used by |storage| and |cache| MUST match.
assert storage.hash_algo == cache.hash_algo
return run_tha_test(
options.isolated, storage, cache, options.leak_temp_dir, args)
if __name__ == '__main__':
# Ensure that we are always running with the correct encoding.
fix_encoding.fix_encoding()
sys.exit(main(sys.argv[1:]))
|
sgraham/nope
|
tools/swarming_client/run_isolated.py
|
Python
|
bsd-3-clause
| 11,215
|
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import os
import shutil
import tempfile
import pytest
import coremltools.converters as converter
from coremltools.converters.mil.input_types import TensorType
from coremltools.converters.mil.frontend.tensorflow.test.testing_utils import (
get_tf_keras_io_names,
)
from coremltools.converters.mil.frontend.tensorflow.test import (
testing_utils as tf_testing_utils,
)
from coremltools.converters.mil.frontend.tensorflow2.test.testing_utils import (
make_tf2_graph
)
tf = pytest.importorskip("tensorflow", minversion="2.1.0")
# -----------------------------------------------------------------------------
# Overwrite utilities to enable different conversion / compare method
tf_testing_utils.frontend = "tensorflow"
tf_testing_utils.make_tf_graph = make_tf2_graph
# -----------------------------------------------------------------------------
# Import TF 2.x-compatible TF 1.x test cases
from coremltools.converters.mil.frontend.tensorflow.test.test_load import (
frontend,
TestTf1ModelInputsOutputs as TestTf2ModelInputsOutputs,
)
class TestTf2ModelFormats:
def setup(self):
self.saved_model_dir = tempfile.mkdtemp()
_, self.model_path_h5 = tempfile.mkstemp(
suffix=".h5", prefix=self.saved_model_dir
)
_, self.model_path_pb = tempfile.mkstemp(
suffix=".pb", prefix=self.saved_model_dir
)
def teardown(self):
if os.path.exists(self.saved_model_dir):
shutil.rmtree(self.saved_model_dir)
def test_keras_model(self):
keras_model = tf.keras.Sequential(
[tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]
)
input_names, output_names = get_tf_keras_io_names(keras_model)
mlmodel = converter.convert(
keras_model,
inputs=[TensorType(input_names[0], (3, 4, 5))],
outputs=["Identity"],
source=frontend,
)
assert mlmodel is not None
def test_keras_saved_model_file(self):
keras_model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28), batch_size=1),
tf.keras.layers.Dense(10, activation=tf.nn.relu),
]
)
keras_model.save(self.saved_model_dir, save_format="tf")
mlmodel = converter.convert(
self.saved_model_dir, outputs="Identity", source=frontend
)
assert mlmodel is not None
def test_keras_h5_file(self):
keras_model = tf.keras.Sequential(
[tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]
)
input_names, output_names = get_tf_keras_io_names(keras_model)
keras_model.save(self.model_path_h5, save_format="h5")
mlmodel = converter.convert(
self.model_path_h5,
inputs=[TensorType(input_names[0], (3, 4, 5))],
outputs=["Identity"],
source=frontend,
)
assert mlmodel is not None
def test_concrete_function_list_from_tf_low_level_api(self):
root = tf.train.Checkpoint()
root.v1 = tf.Variable(3.0)
root.v2 = tf.Variable(2.0)
root.f = tf.function(lambda x: root.v1 * root.v2 * x)
input_data = tf.constant(1.0, shape=[1, 1])
to_save = root.f.get_concrete_function(input_data)
tf.saved_model.save(root, self.saved_model_dir, to_save)
tf_model = tf.saved_model.load(self.saved_model_dir)
concrete_func = tf_model.signatures[
tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY
]
mlmodel = converter.convert(
[concrete_func], outputs="Identity", source=frontend
)
assert mlmodel is not None
def test_saved_model_list_from_tf_function(self):
class build_model(tf.Module):
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)]
)
def __call__(self, x):
return tf.nn.relu(x)
model = build_model()
tf.saved_model.save(model, self.saved_model_dir)
mlmodel = converter.convert(
self.saved_model_dir, outputs=["Identity"], source=frontend
)
assert mlmodel is not None
def test_concrete_function_list_from_tf_function(self):
class build_model(tf.Module):
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)]
)
def __call__(self, x):
return tf.nn.relu(x)
model = build_model()
concrete_func = model.__call__.get_concrete_function()
mlmodel = converter.convert(
[concrete_func], outputs=["Identity"], source=frontend
)
assert mlmodel is not None
def test_model_metadata(self):
keras_model = tf.keras.Sequential(
[tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]
)
input_names, output_names = get_tf_keras_io_names(keras_model)
mlmodel = converter.convert(
keras_model,
inputs=[TensorType(input_names[0], (3, 4, 5))],
outputs=["Identity"],
source=frontend,
)
metadata_keys = mlmodel.get_spec().description.metadata.userDefined
assert "com.github.apple.coremltools.version" in metadata_keys
assert "com.github.apple.coremltools.source" in metadata_keys
assert "tensorflow==2." in metadata_keys["com.github.apple.coremltools.source"]
def test_invalid_format_none(self):
with pytest.raises(NotImplementedError) as e:
converter.convert(None, source=frontend)
e.match(r"Expected model format: .* .h5")
def test_invalid_format_invalid_extension(self):
_, invalid_filename = tempfile.mkstemp(
suffix=".invalid", prefix=self.saved_model_dir
)
with pytest.raises(NotImplementedError) as e:
converter.convert(invalid_filename, source=frontend)
e.match(r"Expected model format: .* .h5")
def test_invalid_format_multiple_concrete_functions(self):
class build_model(tf.Module):
@tf.function(
input_signature=[tf.TensorSpec(shape=[3, 4, 5], dtype=tf.float32)]
)
def __call__(self, x):
return tf.nn.relu(x)
model = build_model()
cf = model.__call__.get_concrete_function()
with pytest.raises(NotImplementedError) as e:
converter.convert([cf, cf, cf], source=frontend)
e.match(r"Only a single concrete function is supported")
def test_invalid_converter_type(self):
keras_model = tf.keras.Sequential(
[tf.keras.layers.ReLU(input_shape=(4, 5), batch_size=3)]
)
with pytest.raises(ValueError) as e:
converter.convert(keras_model, source="invalid")
expected_msg = r'Unrecognized value of argument "source": .*'
e.match(expected_msg)
with pytest.raises(NotImplementedError) as e:
converter.convert(keras_model, convert_to="invalid", source=frontend)
e.match(r"Backend converter .* not implemented")
def test_invalid_format_non_exist(self):
non_exist_filename = self.model_path_h5.replace(".h5", "_non_exist.h5")
with pytest.raises(ValueError) as e:
converter.convert(non_exist_filename, source=frontend)
e.match(r"Input model .* does not exist")
|
apple/coremltools
|
coremltools/converters/mil/frontend/tensorflow2/test/test_v2_load.py
|
Python
|
bsd-3-clause
| 7,681
|
# -*- coding: utf-8 -*-
from .cryptol import (Cryptol, Provers,
ProofResult, SatResult, AllSatResult,
CryptolError, CryptolServerError, ProverError)
|
GaloisInc/pycryptol
|
cryptol/__init__.py
|
Python
|
bsd-3-clause
| 194
|
VERSION = (0,1,4)
|
Axilent/Dox
|
dox/__init__.py
|
Python
|
bsd-3-clause
| 20
|
import template_to_pdf
def render(report):
template = template_to_pdf.Template('job_discovery/print.html')
context = {
"jobs": report.liked_jobs,
"job_pool_location": report.location.adzuna_locations,
}
return template.render(context)
|
lm-tools/situational
|
situational/apps/job_discovery/pdf.py
|
Python
|
bsd-3-clause
| 269
|
# _*_ coding: utf-8 _*_
"""
@copyright Copyright (c) 2014 Submit Consulting
@author Angel Sullon (@asullom)
@package sad
Descripcion: Registro de modelos para la administracion con django de la app sad
"""
from django.contrib import admin
from apps.sad.models import Module, Menu, UserProfileEnterprise, UserProfileHeadquar,\
UserProfileAssociation
from apps.sad.models import Profile
admin.site.register(Module)
admin.site.register(Menu)
admin.site.register(UserProfileEnterprise)
admin.site.register(UserProfileHeadquar)
admin.site.register(UserProfileAssociation)
# Define an inline admin descriptor for UserProfile model
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
# which acts a bit like a singleton
class UserProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name_plural = 'profile'
# Define a new User admin
class UserAdmin(UserAdmin):
inlines = (UserProfileInline,)
# Re-register UserAdmin
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
|
submitconsulting/backenddj
|
apps/sad/admin.py
|
Python
|
bsd-3-clause
| 1,077
|
#!/usr/bin/env python
from setuptools import setup
from subprocess import call
def convert_readme():
try:
call(["pandoc", "-f", "markdown_github", "-t", "rst", "-o", "README.txt", "readme.md"])
except OSError:
pass
return open('README.txt').read()
setup(name='mongodbforms',
version='0.3',
description="An implementation of django forms using mongoengine.",
author='Jan Schrewe',
author_email='jan@schafproductions.com',
url='http://www.schafproductions.com/projects/django-mongodb-forms/',
packages=['mongodbforms',],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
license='New BSD License',
long_description=convert_readme(),
include_package_data=True,
zip_safe=False,
install_requires=['setuptools', 'django>=1.4', 'mongoengine>=0.8.3',],
)
|
fmoro/django-mongodbforms
|
setup.py
|
Python
|
bsd-3-clause
| 1,096
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/CacheDir.py 2009/09/04 16:33:07 david"
__doc__ = """
CacheDir support
"""
import os.path
import stat
import string
import sys
import SCons.Action
cache_enabled = True
cache_debug = False
cache_force = False
cache_show = False
def CacheRetrieveFunc(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if not fs.exists(cachefile):
cd.CacheDebug('CacheRetrieve(%s): %s not in cache\n', t, cachefile)
return 1
cd.CacheDebug('CacheRetrieve(%s): retrieving from %s\n', t, cachefile)
if SCons.Action.execute_actions:
if fs.islink(cachefile):
fs.symlink(fs.readlink(cachefile), t.path)
else:
env.copy_from_cache(cachefile, t.path)
st = fs.stat(cachefile)
fs.chmod(t.path, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
return 0
def CacheRetrieveString(target, source, env):
t = target[0]
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if t.fs.exists(cachefile):
return "Retrieved `%s' from cache" % t.path
return None
CacheRetrieve = SCons.Action.Action(CacheRetrieveFunc, CacheRetrieveString)
CacheRetrieveSilent = SCons.Action.Action(CacheRetrieveFunc, None)
def CachePushFunc(target, source, env):
t = target[0]
if t.nocache:
return
fs = t.fs
cd = env.get_CacheDir()
cachedir, cachefile = cd.cachepath(t)
if fs.exists(cachefile):
# Don't bother copying it if it's already there. Note that
# usually this "shouldn't happen" because if the file already
# existed in cache, we'd have retrieved the file from there,
# not built it. This can happen, though, in a race, if some
# other person running the same build pushes their copy to
# the cache after we decide we need to build it but before our
# build completes.
cd.CacheDebug('CachePush(%s): %s already exists in cache\n', t, cachefile)
return
cd.CacheDebug('CachePush(%s): pushing to %s\n', t, cachefile)
tempfile = cachefile+'.tmp'+str(os.getpid())
errfmt = "Unable to copy %s to cache. Cache file is %s"
if not fs.isdir(cachedir):
try:
fs.makedirs(cachedir)
except EnvironmentError:
# We may have received an exception because another process
# has beaten us creating the directory.
if not fs.isdir(cachedir):
msg = errfmt % (str(target), cachefile)
raise SCons.Errors.EnvironmentError, msg
try:
if fs.islink(t.path):
fs.symlink(fs.readlink(t.path), tempfile)
else:
fs.copy2(t.path, tempfile)
fs.rename(tempfile, cachefile)
st = fs.stat(t.path)
fs.chmod(cachefile, stat.S_IMODE(st[stat.ST_MODE]) | stat.S_IWRITE)
except EnvironmentError:
# It's possible someone else tried writing the file at the
# same time we did, or else that there was some problem like
# the CacheDir being on a separate file system that's full.
# In any case, inability to push a file to cache doesn't affect
# the correctness of the build, so just print a warning.
msg = errfmt % (str(target), cachefile)
SCons.Warnings.warn(SCons.Warnings.CacheWriteErrorWarning, msg)
CachePush = SCons.Action.Action(CachePushFunc, None)
class CacheDir:
def __init__(self, path):
try:
import hashlib
except ImportError:
msg = "No hashlib or MD5 module available, CacheDir() not supported"
SCons.Warnings.warn(SCons.Warnings.NoMD5ModuleWarning, msg)
self.path = None
else:
self.path = path
self.current_cache_debug = None
self.debugFP = None
def CacheDebug(self, fmt, target, cachefile):
if cache_debug != self.current_cache_debug:
if cache_debug == '-':
self.debugFP = sys.stdout
elif cache_debug:
self.debugFP = open(cache_debug, 'w')
else:
self.debugFP = None
self.current_cache_debug = cache_debug
if self.debugFP:
self.debugFP.write(fmt % (target, os.path.split(cachefile)[1]))
def is_enabled(self):
return (cache_enabled and not self.path is None)
def cachepath(self, node):
"""
"""
if not self.is_enabled():
return None, None
sig = node.get_cachedir_bsig()
subdir = string.upper(sig[0])
dir = os.path.join(self.path, subdir)
return dir, os.path.join(dir, sig)
def retrieve(self, node):
"""
This method is called from multiple threads in a parallel build,
so only do thread safe stuff here. Do thread unsafe stuff in
built().
Note that there's a special trick here with the execute flag
(one that's not normally done for other actions). Basically
if the user requested a no_exec (-n) build, then
SCons.Action.execute_actions is set to 0 and when any action
is called, it does its showing but then just returns zero
instead of actually calling the action execution operation.
The problem for caching is that if the file does NOT exist in
cache then the CacheRetrieveString won't return anything to
show for the task, but the Action.__call__ won't call
CacheRetrieveFunc; instead it just returns zero, which makes
the code below think that the file *was* successfully
retrieved from the cache, therefore it doesn't do any
subsequent building. However, the CacheRetrieveString didn't
print anything because it didn't actually exist in the cache,
and no more build actions will be performed, so the user just
sees nothing. The fix is to tell Action.__call__ to always
execute the CacheRetrieveFunc and then have the latter
explicitly check SCons.Action.execute_actions itself.
"""
if not self.is_enabled():
return False
env = node.get_build_env()
if cache_show:
if CacheRetrieveSilent(node, [], env, execute=1) == 0:
node.build(presub=0, execute=0)
return True
else:
if CacheRetrieve(node, [], env, execute=1) == 0:
return True
return False
def push(self, node):
if not self.is_enabled():
return
return CachePush(node, [], node.get_build_env())
def push_if_forced(self, node):
if cache_force:
return self.push(node)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
cournape/numscons
|
numscons/scons-local/scons-local-1.2.0/SCons/CacheDir.py
|
Python
|
bsd-3-clause
| 8,002
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/protobuf/unittest_import.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import unittest_import_public_pb2 as google_dot_protobuf_dot_unittest__import__public__pb2
from google.protobuf.unittest_import_public_pb2 import *
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/protobuf/unittest_import.proto',
package='protobuf_unittest_import',
syntax='proto2',
serialized_pb=b'\n%google/protobuf/unittest_import.proto\x12\x18protobuf_unittest_import\x1a,google/protobuf/unittest_import_public.proto\"\x1a\n\rImportMessage\x12\t\n\x01\x64\x18\x01 \x01(\x05*<\n\nImportEnum\x12\x0e\n\nIMPORT_FOO\x10\x07\x12\x0e\n\nIMPORT_BAR\x10\x08\x12\x0e\n\nIMPORT_BAZ\x10\tB\x1f\n\x18\x63om.google.protobuf.testH\x01\xf8\x01\x01P\x00'
,
dependencies=[google_dot_protobuf_dot_unittest__import__public__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_IMPORTENUM = _descriptor.EnumDescriptor(
name='ImportEnum',
full_name='protobuf_unittest_import.ImportEnum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='IMPORT_FOO', index=0, number=7,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMPORT_BAR', index=1, number=8,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='IMPORT_BAZ', index=2, number=9,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=141,
serialized_end=201,
)
_sym_db.RegisterEnumDescriptor(_IMPORTENUM)
ImportEnum = enum_type_wrapper.EnumTypeWrapper(_IMPORTENUM)
IMPORT_FOO = 7
IMPORT_BAR = 8
IMPORT_BAZ = 9
_IMPORTMESSAGE = _descriptor.Descriptor(
name='ImportMessage',
full_name='protobuf_unittest_import.ImportMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='d', full_name='protobuf_unittest_import.ImportMessage.d', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=113,
serialized_end=139,
)
DESCRIPTOR.message_types_by_name['ImportMessage'] = _IMPORTMESSAGE
DESCRIPTOR.enum_types_by_name['ImportEnum'] = _IMPORTENUM
ImportMessage = _reflection.GeneratedProtocolMessageType('ImportMessage', (_message.Message,), dict(
DESCRIPTOR = _IMPORTMESSAGE,
__module__ = 'google.protobuf.unittest_import_pb2'
# @@protoc_insertion_point(class_scope:protobuf_unittest_import.ImportMessage)
))
_sym_db.RegisterMessage(ImportMessage)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\030com.google.protobuf.testH\001\370\001\001')
# @@protoc_insertion_point(module_scope)
|
sklearn-theano/sklearn-theano
|
sklearn_theano/externals/google/protobuf/unittest_import_pb2.py
|
Python
|
bsd-3-clause
| 3,453
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Author: Tim Henderson
#Email: tim.tadh@hackthology.com
#For licensing see the LICENSE file in the top level directory.
'''
A copy of recursive.py with a lot of debug information added. The debug information
should indicate how the parser works.
'''
import lexer as lx
from lexer import Lex
def parse(s, debug=False):
s = [x for x in Lex(s)] ## s now holds the (token, attribute) "stream"
## I suggest you start reading this code at "def Expr(...)"
## ## ## ## ## ## START EXPRESSION EVALUATOR ## ## ## ## ## ##
def evalop(op, a, b):
if debug: print 'evalop>', op, a, b
if op == '+': return a + b
if op == '-': return a - b
if op == '*': return a * b
if op == '/': return a / b
raise Exception
def collapse(a, b):
if debug: print 'collapse>', a, b
if b is not None:
a = evalop(b[0], a, b[1])
if debug: print ' '*4, 'evalop result>', a
if len(b) == 3:
if debug: print ' '*4, b[2]
return collapse(a, b[2])
if debug: print ' '*4, 'collapse result>', a
return a
def accumulate(op, b, extra):
if debug: print 'accumulate>', op, b, extra
if extra is not None:
if len(extra) == 2:
return op, b, (extra[0], extra[1])
return op, b, (extra[0], extra[1], extra[2])
return op, b
## ## ## ## ## ## END EXPRESSION EVALUATOR ## ## ## ## ## ##
## ## ## ## ## ## START PARSER ## ## ## ## ## ##
# # # # # # # # # # #
#
# Notes on the construction of the parser.
#
# Each function models a production of the formal grammar of the
# language as found in grammar.md. The signature of the each
# function is the same ie.
#
# def ProductionName(i): returns i_out, r
# @i = the index of the next token to consider (before the
# production has been processed).
# @i_out = the index of the next token to consider (after
# the production has been processed) its value
# reflects the input consumed by the production.
# @r = the return value to be passed to the parent.
#
# This parser does not produce an AST or any intermediate language.
# Instead, it evaluates the language in place and produces the result
# of the arithmetic expression. It is not necessary to understand
# this process, but the interested can read the code contained in
# `evalop`, `collapse`, and `accumulate`.
#
# # # # # # # # # # #
## NB. Sorry about the gratuitous print statement messing up the
## readability of the code. I wanted to make the execution of the
## parser understandable at run time. Go to an eariler version of
## this file to see the "clean" version.
def Expr(i):
## Expr : Term Expr_
if debug: print 'Expr : . Term Expr_'
i, r0 = Term(i) # Expr : Term . Expr_
if debug: print 'Expr : Term . Expr_'
i, r1 = Expr_(i) # Expr : Term Expr_ .
if debug: print 'Expr : Term Expr_ .'
return i, collapse(r0, r1)
def Expr_(i):
## Expr_ : PLUS Term Expr_
## Expr_ : DASH Term Expr_
## Expr_ : e (the empty string)
if i >= len(s): # Expr_ : e .
if debug: print 'Expr_ : e .'
return i, None
a = s[i]
if a.type == lx.PLUS: # Expr_ : PLUS . Term Expr_
if debug: print 'Expr_ : PLUS . Term Expr_'
i += 1
op = '+'
elif a.type == lx.DASH: # Expr_ : DASH . Term Expr_
if debug: print 'Expr_ : DASH . Term Expr_'
i += 1
op = '-'
else: # Expr_ : e .
if debug: print 'Expr_ : e .'
return i, None
i, b = Term(i) # Expr_ : (PLUS|DASH) Term . Expr_
if debug: print 'Expr_ : (PLUS|DASH) Term . Expr_'
i, extra = Expr_(i) # Expr_ : (PLUS|DASH) Term Expr_ .
if debug: print 'Expr_ : (PLUS|DASH) Term Expr_ .'
return i, accumulate(op, b, extra)
def Term(i):
## Term : Factor Term_
if debug: print 'Term : . Factor Term_'
i, r0 = Factor(i) # Term : Factor . Term_
if debug: print 'Term : Factor . Term_'
i, r1 = Term_(i) # Term : Factor Term_ .
if debug: print 'Term : Factor Term_ .'
return i, collapse(r0, r1)
def Term_(i):
## Term_ : STAR Factor Term_
## Term_ : SLASH Factor Term_
## Term_ : e (the empty string)
if i >= len(s): # Term_ : e .
if debug: print 'Term_ : e .'
return i, None
a = s[i]
if a.type == lx.STAR: # Term_ : STAR . Factor Term_
if debug: print 'Term_ : STAR . Factor Term_'
i += 1
op = '*'
elif a.type == lx.SLASH: # Term_ : SLASH . Factor Term_
if debug: print 'Term_ : SLASH . Factor Term_'
i += 1
op = '/'
else: # Term_ : e .
if debug: print 'Term_ : e .'
return i, None
i, b = Factor(i) # Term_ : (STAR|SLASH) Factor . Term_
if debug: print 'Term_ : (STAR|SLASH) Factor . Term_'
i, extra = Term_(i) # Term_ : (STAR|SLASH) Factor Term_ .
if debug: print 'Term_ : (STAR|SLASH) Factor Term_ .'
return i, accumulate(op, b, extra)
def Factor(i):
## Factor : NUMBER
## Factor : DASH NUMBER
## Factor : LPAREN Expr RPAREN
a = s[i]
if a.type == lx.NUMBER: # Factor : NUMBER .
i += 1
r = a.value
if debug: print 'Factor : NUMBER .'
elif a.type == lx.DASH: # Factor : DASH . NUMBER
if debug: print 'Factor : DASH . NUMBER'
i += 1
a = s[i]
if a.type == lx.NUMBER: # Factor : DASH NUMBER .
if debug: print 'Factor : DASH NUMBER .'
i += 1
r = -1 * a.value
else:
raise SyntaxError
elif a.type == lx.LPAREN: # Factor : LPAREN . Expr RPAREN
i += 1
if debug: print 'Factor : LPAREN . Expr RPAREN'
i, r = Expr(i) # Factor : LPAREN Expr . RPAREN
if debug: print 'Factor : LPAREN Expr . RPAREN'
a = s[i]
if a.type != lx.RPAREN:
raise SyntaxError
i += 1 # Factor : LPAREN Expr RPAREN .
if debug: print 'Factor : LPAREN Expr RPAREN .'
else:
raise SyntaxError, "Unexpected token %s" % a
return i, r
## ## ## ## ## ## END PARSER ## ## ## ## ## ##
# This kicks off the parser.
i, r = Expr(0)
# If i (the next symbol indicator) does not equal the length of the
# input then there is unconsumed input.
if i != len(s):
raise SyntaxError, "Unconsumed input %s" % (s[i:])
return r
if __name__ == '__main__':
print parse('(2+3)*4', True)
|
timtadh/PyOhio2011
|
recursive_verbose.py
|
Python
|
bsd-3-clause
| 7,503
|
# -*- encoding: utf-8 -*-
# pylint: skip-file
from __future__ import absolute_import
import logging
import time
import unittest
import warnings
import json
from requests.utils import quote
from jose.exceptions import JWTError
import six
import httmock
from esipy import EsiSecurity
from esipy.events import Signal
from esipy.exceptions import APIException
from .mock import _all_auth_mock_
from .mock import non_json_error
from .mock import oauth_token
from .mock import oauth_revoke
# set pyswagger logger to error, as it displays too much thing for test needs
pyswagger_logger = logging.getLogger('pyswagger')
pyswagger_logger.setLevel(logging.ERROR)
class TestEsiSecurity(unittest.TestCase):
CALLBACK_URI = "https://foo.bar/baz/callback"
CLIENT_ID = 'foo'
SECRET_KEY = 'bar'
BASIC_TOKEN = six.u('Zm9vOmJhcg==')
SECURITY_NAME = 'evesso'
TOKEN_IDENTIFIER = 'ESIPY_TEST_TOKEN'
CODE_VERIFIER = "dBjftJeZ4CVP-mB92K27uhbUJU1p1r_wW1gFWFOEjXk"
CODE_CHALLENGE = "E9Melhoa2OwvFrEMTJguCHaoeK1t8URWbuGJSstw-cM"
RSC_SSO_ENDPOINTS = "test/resources/oauth-authorization-server.json"
RSC_JWKS = "test/resources/jwks.json"
def setUp(self):
warnings.simplefilter('ignore')
self.custom_refresh_token_signal = Signal()
with httmock.HTTMock(*_all_auth_mock_):
self.security = EsiSecurity(
redirect_uri=TestEsiSecurity.CALLBACK_URI,
client_id=TestEsiSecurity.CLIENT_ID,
secret_key=TestEsiSecurity.SECRET_KEY,
signal_token_updated=self.custom_refresh_token_signal,
token_identifier=TestEsiSecurity.TOKEN_IDENTIFIER
)
self.security_pkce = EsiSecurity(
redirect_uri=TestEsiSecurity.CALLBACK_URI,
client_id=TestEsiSecurity.CLIENT_ID,
code_verifier=TestEsiSecurity.CODE_VERIFIER,
)
with open(TestEsiSecurity.RSC_SSO_ENDPOINTS, 'r') as sso_endpoints:
self.sso_endpoints = json.load(sso_endpoints)
def test_esisecurity_init(self):
with httmock.HTTMock(*_all_auth_mock_):
with self.assertRaises(AttributeError):
EsiSecurity(
redirect_uri=TestEsiSecurity.CALLBACK_URI,
client_id=TestEsiSecurity.CLIENT_ID,
secret_key=TestEsiSecurity.SECRET_KEY,
sso_endpoints_url=""
)
with self.assertRaises(AttributeError):
EsiSecurity(
redirect_uri=TestEsiSecurity.CALLBACK_URI,
client_id=TestEsiSecurity.CLIENT_ID
)
with open(TestEsiSecurity.RSC_JWKS, 'r') as jwks:
jwks = json.load(jwks)
EsiSecurity(
redirect_uri=TestEsiSecurity.CALLBACK_URI,
client_id=TestEsiSecurity.CLIENT_ID,
secret_key=TestEsiSecurity.SECRET_KEY,
jwks_key=jwks['keys'][0]
)
self.assertEqual(
self.security.security_name,
TestEsiSecurity.SECURITY_NAME
)
self.assertEqual(
self.security.redirect_uri,
TestEsiSecurity.CALLBACK_URI
)
self.assertEqual(
self.security.client_id,
TestEsiSecurity.CLIENT_ID
)
self.assertEqual(
self.security.secret_key,
TestEsiSecurity.SECRET_KEY
)
self.assertEqual(
self.security.token_identifier,
TestEsiSecurity.TOKEN_IDENTIFIER
)
self.assertEqual(
self.security.oauth_issuer,
self.sso_endpoints['issuer']
)
self.assertEqual(
self.security.oauth_authorize,
self.sso_endpoints['authorization_endpoint']
)
self.assertEqual(
self.security.oauth_token,
self.sso_endpoints['token_endpoint']
)
self.assertEqual(
self.security.oauth_revoke,
self.sso_endpoints['revocation_endpoint']
)
def test_esisecurity_update_token(self):
self.security.update_token({
'access_token': 'access_token',
'refresh_token': 'refresh_token',
'expires_in': 60
})
self.assertEqual(self.security.access_token, 'access_token')
self.assertEqual(self.security.refresh_token, 'refresh_token')
self.assertEqual(self.security.token_expiry, int(time.time() + 60))
def test_esisecurity_get_auth_uri(self):
with self.assertRaises(AttributeError):
self.security.get_auth_uri(state="")
self.assertEqual(
self.security.get_auth_uri(state='teststate'),
("%s?response_type=code"
"&redirect_uri=%s&client_id=%s&state=teststate") % (
self.sso_endpoints['authorization_endpoint'],
quote(TestEsiSecurity.CALLBACK_URI, safe=''),
TestEsiSecurity.CLIENT_ID
)
)
self.assertEqual(
self.security.get_auth_uri(implicit=True, state='teststate'),
("%s?response_type=token"
"&redirect_uri=%s&client_id=%s&state=teststate") % (
self.sso_endpoints['authorization_endpoint'],
quote(TestEsiSecurity.CALLBACK_URI, safe=''),
TestEsiSecurity.CLIENT_ID
)
)
scopes = ["Scope1", "Scope2"]
self.assertEqual(
self.security.get_auth_uri(scopes=scopes, state='teststate'),
("%s?response_type=code&redirect_uri=%s"
"&client_id=%s&scope=Scope1+Scope2&state=teststate") % (
self.sso_endpoints['authorization_endpoint'],
quote(TestEsiSecurity.CALLBACK_URI, safe=''),
TestEsiSecurity.CLIENT_ID
)
)
def test_esisecurity_get_access_token_request_params(self):
params = self.security.get_access_token_params('foo')
self.assertEqual(
params['headers'],
{'Authorization': 'Basic %s' % TestEsiSecurity.BASIC_TOKEN}
)
self.assertEqual(
params['url'],
self.sso_endpoints['token_endpoint']
)
self.assertEqual(
params['data'],
{
'grant_type': 'authorization_code',
'code': 'foo',
}
)
def test_esisecurity_get_refresh_token_request_params(self):
with self.assertRaises(AttributeError):
self.security.get_refresh_token_params()
self.security.update_token({
'access_token': 'access_token',
'refresh_token': 'refresh_token',
'expires_in': 60
})
# refresh all scopes
params = self.security.get_refresh_token_params()
self.assertEqual(
params['headers'],
{'Authorization': 'Basic %s' % TestEsiSecurity.BASIC_TOKEN}
)
self.assertEqual(
params['url'],
self.sso_endpoints['token_endpoint']
)
self.assertEqual(
params['data'],
{
'grant_type': 'refresh_token',
'refresh_token': 'refresh_token',
}
)
# refresh specific scopes
params = self.security.get_refresh_token_params(scope_list=['a', 'b'])
self.assertEqual(
params['data'],
{
'grant_type': 'refresh_token',
'refresh_token': 'refresh_token',
'scope': 'a+b'
}
)
# refresh specific scopes exception
with self.assertRaises(AttributeError):
self.security.get_refresh_token_params(scope_list='notalist')
def test_esisecurity_token_expiry(self):
self.security.token_expiry = None
self.assertTrue(self.security.is_token_expired())
self.security.token_expiry = time.time() - 10
self.assertTrue(self.security.is_token_expired())
self.security.token_expiry = time.time() + 60
self.assertFalse(self.security.is_token_expired())
self.assertTrue(self.security.is_token_expired(offset=70))
def test_esisecurity_auth(self):
with httmock.HTTMock(oauth_token):
ret = self.security.auth('let it bee')
self.assertEqual(ret['access_token'], 'access_token')
self.assertEqual(ret['refresh_token'], 'refresh_token')
self.assertEqual(ret['expires_in'], 1200)
ret = self.security.auth('no_refresh')
self.assertEqual(ret['access_token'], 'access_token')
self.assertNotIn('refresh_token', ret)
self.assertEqual(ret['expires_in'], 1200)
with self.assertRaises(APIException):
self.security.auth('fail_test')
def test_esisecurity_refresh(self):
with httmock.HTTMock(oauth_token):
self.security.refresh_token = 'refresh_token'
ret = self.security.refresh()
self.assertEqual(ret['access_token'], 'access_token')
self.assertEqual(ret['refresh_token'], 'refresh_token')
self.assertEqual(ret['expires_in'], 1200)
with self.assertRaises(APIException):
self.security.refresh_token = 'fail_test_token'
self.security.refresh()
def test_esisecurity_revoke(self):
with httmock.HTTMock(oauth_revoke):
self.security.refresh_token = 'refresh_token'
self.security.revoke()
self.security.access_token = 'access_token'
self.security.revoke()
with self.assertRaises(AttributeError):
self.security.revoke()
def test_esisecurity_verify(self):
# this is just for coverage purpose. This doesn't work without valid
# jwt token
with self.assertRaises(AttributeError):
self.security.verify()
self.security.update_token({
'access_token': 'access_token',
'refresh_token': 'refresh_token',
'expires_in': 60
})
with self.assertRaises(JWTError):
self.security.verify()
with httmock.HTTMock(*_all_auth_mock_):
with open(TestEsiSecurity.RSC_JWKS, 'r') as jwks:
jwks = json.load(jwks)
security_nojwks = EsiSecurity(
redirect_uri=TestEsiSecurity.CALLBACK_URI,
client_id=TestEsiSecurity.CLIENT_ID,
secret_key=TestEsiSecurity.SECRET_KEY,
jwks_key=jwks['keys'][0]
)
security_nojwks.update_token({
'access_token': 'access_token',
'refresh_token': 'refresh_token',
'expires_in': 60
})
with self.assertRaises(JWTError):
security_nojwks.verify()
def test_esisecurity_call(self):
class RequestTest(object):
def __init__(self):
self._security = []
self._p = {'header': {}}
self.security.update_token({
'access_token': 'access_token',
'refresh_token': 'refresh_token',
'expires_in': 60
})
req = RequestTest()
self.security(req)
self.assertNotIn('Authorization', req._p['header'])
req._security.append({
'unknown_security_name': {},
})
self.security(req)
self.assertNotIn('Authorization', req._p['header'])
req._security.append({
'evesso': {},
})
self.security(req)
self.assertIn('Authorization', req._p['header'])
self.assertEqual(
'Bearer access_token',
req._p['header']['Authorization']
)
def test_esisecurity_callback_refresh(self):
class RequestTest(object):
""" pyswagger Request object over simplified for test purpose"""
def __init__(self):
self._security = ['evesso']
self._p = {'header': {}}
def callback_function(**kwargs):
callback_function.count += 1
callback_function.count = 0
self.custom_refresh_token_signal.add_receiver(callback_function)
self.security.update_token({
'access_token': 'access_token',
'refresh_token': 'refresh_token',
'expires_in': -1
})
# test the auto refresh callback event customized
with httmock.HTTMock(oauth_token):
req = RequestTest()
self.security(req)
self.assertEqual(callback_function.count, 1)
def test_esisecurity_non_json_response(self):
self.security.update_token({
'access_token': 'access_token',
'refresh_token': 'refresh_token',
'expires_in': -1
})
with httmock.HTTMock(non_json_error):
try:
self.security.auth('somecode')
except APIException as exc:
self.assertEqual(exc.status_code, 502)
self.assertEqual(
exc.response,
six.b('<html><body>Some HTML Errors</body></html>')
)
try:
self.security.refresh()
except APIException as exc:
self.assertEqual(exc.status_code, 502)
self.assertEqual(
exc.response,
six.b('<html><body>Some HTML Errors</body></html>')
)
def test_esisecurity_pkce(self):
uri = self.security_pkce.get_auth_uri('test')
self.assertIn(
'code_challenge=%s' % TestEsiSecurity.CODE_CHALLENGE,
uri
)
params = self.security_pkce.get_access_token_params('test')
self.assertEqual(
params['data']['code_verifier'],
TestEsiSecurity.CODE_VERIFIER
)
self.assertEqual(
params['data']['client_id'],
TestEsiSecurity.CLIENT_ID
)
self.assertNotIn('Authorization', params['headers'])
|
Kyria/EsiPy
|
test/test_security.py
|
Python
|
bsd-3-clause
| 14,211
|
from flask_wtf import Form
from wtforms import TextField, PasswordField
from wtforms.validators import DataRequired, Email, EqualTo, Length
from .models import Meeting_Action_Item_User
|
phamtrisi/metapp2
|
metapp2/meeting_action_item_user/forms.py
|
Python
|
bsd-3-clause
| 185
|
import redis
server = redis.Redis(host='localhost', port=6379, db=0)
while True:
message = raw_input("What to say: ")
server.publish('messages', message)
if message == 'quit':
break
|
toastdriven/wsgi_longpolling
|
messager.py
|
Python
|
bsd-3-clause
| 205
|
from djviewmodels.views import View, Redirect
from djviewmodels.viewmodels import Viewmodel
class Item(object):
def __init__(self, id):
self.cost = 40 * id
self.wh_markup = 10
self.retail_markup = 500
class ItemViewmodel(Viewmodel):
model = Item
wrap_each = True
class WholesalerItem(Viewmodel):
model = Item
wrap_each = True
fields = ['cost', 'wh_markup']
@property
def price(self):
return self.cost + self.wh_markup
class RetailItem(Viewmodel):
model = Item
wrap_each = True
exclude = ['wh_markup']
@property
def price(self):
return self.cost + self.retail_markup
class ItemAggregate(Viewmodel):
wrap_collection = True
model = Item
def total_price(self):
total = 0.0
for item in self.instances:
total += item.cost
return total
class ItemView(View):
viewmodels = { }
template_name = 'item.html'
def render_template(self, template_name, context, request):
return (template_name, context)
def init_request(self, request=None, item_id=None, *args, **kwargs):
if request.user.type == 'wholesaler':
self.viewmodels['item'] = WholesalerItem
else:
self.viewmodels['item'] = RetailItem
if not item_id:
raise Redirect("item_list")
item = Item(item_id)
return dict(item=item)
def get(self, request=None, item=None, *args, **kwargs):
return dict(item=item)
def post(self, request=None, item=None, *args, **kwargs):
request.item = item
raise Redirect("cart")
class CartView(View):
template_name = 'cart.html'
viewmodels = dict(
items = ItemViewmodel,
cart_total = ItemAggregate,
)
def get(self, request=None, *args, **kwargs):
items = request.session.get("cart")
return dict(
items=items,
cart_total=items,)
|
freyley/django-viewmodels
|
djviewmodels/test_views.py
|
Python
|
bsd-3-clause
| 1,979
|
presets = {
'glider': {
'first_generation': set([(0, 0), (1, 0), (2, 0), (2, 1), (1, 2)]),
'board_size': (20,20)
},
'flasher': {
'first_generation': set([(5,5),(5,6),(5,7)]),
'board_size': (20,20)
},
'beacon': {
'first_generation': set([(5,1),(5,2),(6,1),(6,2),(7,3),(8,3),(7,4),(8,4)]),
'board_size': (20,20)
},
'clock': {
'first_generation': set([(5,2),(6,2),(6,4),(7,1),(7,3),(8,3)]),
'board_size': (20,20)
},
'frog': {
'first_generation': set([(5,1),(6,1),(7,1),(6,2),(7,2),(8,2)]),
'board_size': (20,20)
},
'glider_gun': {
'first_generation': set([(3,6),(4,6),(4,7),(13,6),(13,7),(13,8),(14,5),(14,9),(15,4),(15,10),(16,4),(16,10),(26,3),(26,7),(27,2),(27,3),(27,7),(27,8),(29,4),(29,5),(29,6),(30,4),(30,5),(30,6),(31,5),(37,4),(38,4),(38,5)]),
'board_size': (40,40)
}
}
|
missterr/gol
|
gol/presets.py
|
Python
|
bsd-3-clause
| 818
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def update_artists(apps, schema_editor):
Artist = apps.get_model("fma", "Artist")
db_alias = schema_editor.connection.alias
for artist in Artist.objects.using(db_alias).all():
artist.save(using=db_alias)
class Migration(migrations.Migration):
dependencies = [
('fma', '0002_auto_20150105_0651'),
]
operations = [
migrations.RunPython(
update_artists,
),
]
|
FreeMusicNinja/api.freemusic.ninja
|
fma/migrations/0003_auto_20150203_0725.py
|
Python
|
bsd-3-clause
| 540
|
# This is your project's main settings file that can be committed to your
# repo. If you need to override a setting locally, use settings/local.py
import os
from bundles import PIPELINE_CSS, PIPELINE_JS
ROOT = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'..',
'..'
))
def path(*dirs):
return os.path.join(ROOT, *dirs)
SITE_ID = 1
LANGUAGE_CODE = 'en-US'
PROJECT_MODULE = 'airmozilla'
# Defines the views served for root URLs.
ROOT_URLCONF = '%s.urls' % PROJECT_MODULE
INSTALLED_APPS = (
'pipeline',
'django_browserid',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.staticfiles',
'session_csrf',
# Application base, containing global templates.
'airmozilla.base',
'airmozilla.main',
'airmozilla.authentication',
'airmozilla.manage',
'airmozilla.suggest',
'airmozilla.search',
'airmozilla.comments',
'airmozilla.uploads',
'airmozilla.starred',
'airmozilla.subtitles',
'airmozilla.surveys',
'airmozilla.roku',
'airmozilla.cronlogger',
'airmozilla.staticpages',
'airmozilla.new',
'airmozilla.popcorn',
'airmozilla.chapters',
'airmozilla.closedcaptions',
'djcelery',
'kombu.transport.django',
'bootstrapform',
'sorl.thumbnail',
'django.contrib.messages',
'django.contrib.sites',
'django.contrib.flatpages', # this can be deleted later
'cronjobs',
'raven.contrib.django.raven_compat',
'django_jinja',
'django_nose', # deliberately making this the last one
)
# Absolute path to the directory that holds media.
MEDIA_ROOT = path('media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
STATIC_ROOT = path('static')
# URL prefix for static files
STATIC_URL = '/static/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'pipeline.finders.PipelineFinder',
)
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
# Necessary so that test-utils doesn't try to execute some deprecated
# functionality on the database connection.
SQL_RESET_SEQUENCES = False
# We can use the simplest hasher because we never store usable passwords
# thanks to Persona.
PASSWORD_HASHERS = ('django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',)
# our session storage is all memcache so using it instead of FallbackStorage
# which uses CookieStorage by default so sessions are better
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
# Note that this is different when running tests.
# You know in case you're debugging tests.
AUTHENTICATION_BACKENDS = (
'%s.authentication.backend.AirmozillaBrowserIDBackend' % PROJECT_MODULE,
# but we're keeping this in case people still have sessions
# whose backend cookie points to this class path
'django_browserid.auth.BrowserIDBackend',
# Needed because the tests
# use self.client.login(username=..., password=...)
'django.contrib.auth.backends.ModelBackend',
)
# Domains allowed for log in
ALLOWED_BID = (
'mozilla.com',
'mozillafoundation.org',
'mozilla-japan.org',
)
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGIN_REDIRECT_URL_FAILURE = '/login-failure/'
_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.request',
'session_csrf.context_processor',
'django.contrib.messages.context_processors.messages',
'airmozilla.manage.context_processors.badges',
'airmozilla.main.context_processors.base',
'airmozilla.main.context_processors.nav_bar',
'airmozilla.main.context_processors.search_form',
'airmozilla.main.context_processors.sidebar',
'airmozilla.main.context_processors.analytics',
'airmozilla.main.context_processors.dev',
'airmozilla.main.context_processors.browserid',
'airmozilla.main.context_processors.faux_i18n',
'airmozilla.main.context_processors.autocompeter',
'airmozilla.main.context_processors.fanout',
'airmozilla.starred.context_processors.stars',
)
TEMPLATES = [
{
'BACKEND': 'django_jinja.backend.Jinja2',
'APP_DIRS': True,
'OPTIONS': {
# Use jinja2/ for jinja templates
'app_dirname': 'jinja2',
# Don't figure out which template loader to use based on
# file extension
'match_extension': '',
# 'newstyle_gettext': True,
'context_processors': _CONTEXT_PROCESSORS,
'debug': False,
'undefined': 'jinja2.Undefined',
'extensions': [
'jinja2.ext.do',
'jinja2.ext.loopcontrols',
'jinja2.ext.with_',
'jinja2.ext.i18n', # needed to avoid errors in django_jinja
'jinja2.ext.autoescape',
'django_jinja.builtins.extensions.CsrfExtension',
'django_jinja.builtins.extensions.StaticFilesExtension',
'django_jinja.builtins.extensions.DjangoFiltersExtension',
'pipeline.jinja2.PipelineExtension',
],
'globals': {
'browserid_info': 'django_browserid.helpers.browserid_info',
'browserid_login': 'django_browserid.helpers.browserid_login',
'browserid_logout': 'django_browserid.helpers.browserid_logout'
}
}
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [], # what does this do?!
'APP_DIRS': True,
'OPTIONS': {
'debug': False,
'context_processors': _CONTEXT_PROCESSORS,
}
},
]
# Always generate a CSRF token for anonymous users.
ANON_ALWAYS = True
# Remove localization middleware
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.security.SecurityMiddleware',
'session_csrf.CsrfMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'csp.middleware.CSPMiddleware',
'airmozilla.authentication.middleware.ValidateIDToken',
'airmozilla.manage.middleware.CacheBustingMiddleware',
'airmozilla.staticpages.middleware.StaticPageFallbackMiddleware',
)
X_FRAME_OPTIONS = 'DENY'
# Enable timezone support for Django TZ-aware datetime objects
# Times stored in the db as UTC; forms/templates as Pacific time
USE_TZ = True
TIME_ZONE = 'UTC'
# Configuration for live/archiving events treatment
# How much time, in minutes, an event shows as "live" before its start time.
LIVE_MARGIN = 10
# Default amount of time, in minutes, an event spends in the "archiving" state.
ARCHIVING_MARGIN = 60
# How many events in the past (and future) should the calendar system
# return. E.g. if CALENDAR_SIZE=30, up to 60 events (half from the past
# and half from the future) will be output.
CALENDAR_SIZE = 30
# How many events should appear in the syndication feeds
FEED_SIZE = 20
# Number of upcoming events to display in the sidebar
UPCOMING_SIDEBAR_COUNT = 5
# Number of featured/trending events to display in the sidebar
FEATURED_SIDEBAR_COUNT = 5
# Number of trending events to display in the Roku feed
TRENDING_ROKU_COUNT = 20
# Use memcached for session storage with fallback on the database
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
# Always use secure cookies
COOKIES_SECURE = True
# URL to connect to ElasticSearch
ELASTICSEARCH_URL = 'http://localhost:9200/'
# Number of related events to display (max)
RELATED_CONTENT_SIZE = 4
# Boosting of title and tags, makes them matter more.
RELATED_CONTENT_BOOST_TITLE = 1.0
RELATED_CONTENT_BOOST_TAGS = -0.5
# Defaults for Mozillians
MOZILLIANS_API_BASE = 'https://mozillians.org'
# API base URL
VIDLY_BASE_URL = 'https://vid.ly'
VIDLY_API_URL = 'https://m.vid.ly/api/'
# Name of the default Channel
DEFAULT_CHANNEL_SLUG = 'main'
DEFAULT_CHANNEL_NAME = 'Main'
# Name of the default channel for Mozillians
MOZILLIANS_CHANNEL_SLUG = 'mozillians'
MOZILLIANS_CHANNEL_NAME = 'Mozillians'
# How often, maximum are approval pester emails sent
PESTER_INTERVAL_DAYS = 3 # days
# Where you put secure username+password combinations for example
URL_TRANSFORM_PASSWORDS = {}
# Bit.ly URL shortener access token
# See README about how to generate one
BITLY_ACCESS_TOKEN = None
BITLY_URL = 'https://api-ssl.bitly.com/v3/shorten'
# Overridden so we can depend on more complex checking
BROWSERID_VERIFY_CLASS = (
'%s.authentication.views.CustomBrowserIDVerify' % PROJECT_MODULE
)
BROWSERID_REQUEST_ARGS = {'siteName': 'Air Mozilla'}
# Name of the bucket where you upload all large videos
S3_UPLOAD_BUCKET = 'air-mozilla-uploads'
# See http://amara.org/en/profiles/account/
# (THESE MIGHT BE OBSOLETE!)
AMARA_BASE_URL = 'https://www.amara.org/api2/partners'
AMARA_API_USERNAME = ''
AMARA_API_KEY = ''
# The new settings
AMARA_BASE_URL = 'https://amara.org'
AMARA_TEAM = 'mozilla'
AMARA_PROJECT = 'airmozilla'
AMARA_USERNAME = ''
AMARA_API_KEY = ''
SCRAPE_CREDENTIALS = {
# ('username', 'password'): ['intranet.mozilla.org'],
}
# If true, every search is logged and recorded
LOG_SEARCHES = True
try:
# ujson is a much faster json serializer
# We tell the django-jsonview decorator to use it only if the ujson
# package is installed and can be imported
import ujson # NOQA
JSON_MODULE = 'ujson'
JSON_USE_DJANGO_SERIALIZER = False
except ImportError:
pass
# When extracting screen captures, how many do we want to extract
# for each video. This number is static independent of the length
# of the video.
SCREENCAPTURES_NO_PICTURES = 12
# Name of the directory that gets created in the temp directory
# that we fill with screencaps, and that gets later picked up
# by another job that imports the JPEGs created there.
SCREENCAPTURES_TEMP_DIRECTORY_NAME = 'airmozilla-screencaps'
# Usernames of people who have contributed to Air Mozilla (as a contributor).
# This list is ordered! Ordered by the first contributor first, and the most
# recent contributor last.
# These usernames must exist in the
# https://mozillians.org/en-US/group/air-mozilla-contributors/ group.
CONTRIBUTORS = (
'onceuponatimeforever',
'bugZPDX',
'lcamacho',
'quentinp',
'leo',
'koddsson',
'KrystalYu',
'anuragchaudhury',
'gloriadwomoh',
'a-buck',
'anjalymehla',
'julian.alexander.murillo',
)
# Override this if you want to run the selenium based tests
RUN_SELENIUM_TESTS = False
# When enabled, together with DEBUG==True, by visiting /god-mode/ you
# can become anybody.
# This is a good tool for doing testing without doing any Persona auth.
GOD_MODE = False
# If you want to disable all of the browser ID stuff, set this to True.
# That means you won't be able to sign in at all. Or sign out.
BROWSERID_DISABLED = False
# How many times to try sending out an event tweet.
MAX_TWEET_ATTEMPTS = 3
# Where do we store jobs for the celery message queue
BROKER_URL = 'django://'
CELERY_ALWAYS_EAGER = False
BROKER_CONNECTION_TIMEOUT = 0.1
CELERYD_CONCURRENCY = 2
CELERY_IGNORE_RESULT = True
THUMBNAIL_BACKEND = 'optisorl.backend.OptimizingThumbnailBackend'
# This turns of the thumbnail optimizer using pngquant so it's
# not used unless you explicitely turn it on.
PNGQUANT_LOCATION = None
# The user group where being a member means you get an email about
# all new event requests
NOTIFICATIONS_GROUP_NAME = 'Event Notifications'
# Adding prefix to airmozilla events index
ELASTICSEARCH_PREFIX = 'airmozilla'
ELASTICSEARCH_INDEX = 'events'
# legacy junk in settings/local.py on production deployments
BASE_PASSWORD_HASHERS = HMAC_KEYS = []
YOUTUBE_API_KEY = None
# You have to run `npm install` for this to be installed in `./node_modules`
PIPELINE = {
'STYLESHEETS': PIPELINE_CSS,
'JAVASCRIPT': PIPELINE_JS,
'JS_COMPRESSOR': 'pipeline.compressors.uglifyjs.UglifyJSCompressor',
'UGLIFYJS_BINARY': path('node_modules/.bin/uglifyjs'),
'UGLIFYJS_ARGUMENTS': '--mangle',
'CSS_COMPRESSOR': 'pipeline.compressors.cssmin.CSSMinCompressor',
'CSSMIN_BINARY': path('node_modules/.bin/cssmin'),
# Don't wrap javascript code in... `(...code...)();`
# because possibly much code has been built with the assumption that
# things will be made available globally.
'DISABLE_WRAPPER': True,
# The pipeline.jinja2.PipelineExtension extension doesn't support
# automatically rendering any potentional compilation errors into
# the rendered HTML, so just let it raise plain python exceptions.
'SHOW_ERRORS_INLINE': False,
}
POPCORN_EDITOR_CDN_URL = "//d2edlhmcxlovf.cloudfront.net"
# Fanout.io account defaults for a realm.
FANOUT_REALM_ID = FANOUT_REALM_KEY = None
# Content Security Policies
CSP_DEFAULT_SRC = (
"'self'",
)
CSP_FONT_SRC = (
"'self'",
'data:',
'air.cdn.mozilla.net',
'cdn.jsdelivr.net',
'ssl.p.jwpcdn.com',
)
CSP_IMG_SRC = (
"'self'",
'data:',
'vid.ly',
'air.cdn.mozilla.net',
'secure.gravatar.com',
'jwpltx.com', # used by JWPlayer
'ssl.google-analytics.com',
'mozillians.org',
'https://i0.wp.com', # used by mozillians.org
)
CSP_SCRIPT_SRC = (
"'self'",
"'unsafe-inline'", # needed for video templates
"'unsafe-eval'", # needed for hls.js
'cdn.jsdelivr.net',
'air.cdn.mozilla.net',
'vid.ly',
's.vid.ly',
'cf.cdn.vid.ly',
'ssl.p.jwpcdn.com', # Where vidly hosts the JWPlayer
'ssl.google-analytics.com',
'd132d9vcg4o0oh.cloudfront.net', # vid.ly's jwplayer.js
)
CSP_CONNECT_SRC = (
"'self'",
'vid.ly',
'mozillalives-i.akamaihd.net',
'autocompeter.com',
)
CSP_MEDIA_SRC = (
"'self'",
'blob:',
'vid.ly',
'mozillalives-i.akamaihd.net'
)
CSP_STYLE_SRC = (
"'self'",
"'unsafe-inline'",
'air.cdn.mozilla.net',
)
CSP_CHILD_SRC = (
"'self'",
'blob:',
'vid.ly',
'www.youtube-nocookie.com',
'login.persona.org',
)
# See https://bugzilla.mozilla.org/show_bug.cgi?id=1310807
SECURE_HSTS_SECONDS = 31536000
# See
# https://wiki.mozilla.org/Security/Guidelines/Web_Security#X-XSS-Protection
# Sets the 'X-XSS-Protection' header to '1; mode=block'
SECURE_BROWSER_XSS_FILTER = True
# See
# https://wiki.mozilla.org/Security/Guidelines/Web_Security#X-Content-Type-Options # noqa
SECURE_CONTENT_TYPE_NOSNIFF = True
REV_CLIENT_API_KEY = ''
REV_USER_API_KEY = ''
REV_BASE_URL = 'https://www.rev.com'
AUTH0_DOMAIN = 'auth.mozilla.auth0.com'
AUTH0_CLIENT_ID = 'YOU_CLIENT_ID'
AUTH0_SECRET = 'YOUR_SECRET'
AUTH0_CALLBACK_URL = 'https://air.mozilla.org/authentication/callback/'
AUTH0_SUCCESS_URL = '/?logged=in'
AUTH0_PATIENCE_TIMEOUT = 5 # seconds
AUTH_SIGNOUT_URL = 'https://air.mozilla.org/?logged=out'
# This is the number of seconds that define how often we check the
# logged in users' id_token (if they have one).
# If this number is too low, too many requests on this site are blocked
# on talking to Auth0. If it's too high, we're not being secure enough.
RENEW_ID_TOKEN_EXPIRY_SECONDS = 60 * 15 # 15 min
VIDLY_VIDEO_URL_FORMAT = 'https://vid.ly/{}?content=video&format=webm'
VIDLY_POSTER_URL_FORMAT = 'https://vid.ly/{}/poster'
|
blossomica/airmozilla
|
airmozilla/settings/base.py
|
Python
|
bsd-3-clause
| 15,787
|
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements commands for running and interacting with Fuchsia on QEMU."""
import boot_data
import common
import emu_target
import hashlib
import logging
import os
import platform
import qemu_image
import shutil
import subprocess
import sys
import tempfile
from common import GetHostArchFromPlatform, GetEmuRootForPlatform
from common import EnsurePathExists
from qemu_image import ExecQemuImgWithRetry
from target import FuchsiaTargetException
# Virtual networking configuration data for QEMU.
HOST_IP_ADDRESS = '10.0.2.2'
GUEST_MAC_ADDRESS = '52:54:00:63:5e:7b'
# Capacity of the system's blobstore volume.
EXTENDED_BLOBSTORE_SIZE = 2147483648 # 2GB
def GetTargetType():
return QemuTarget
class QemuTarget(emu_target.EmuTarget):
EMULATOR_NAME = 'qemu'
def __init__(self, out_dir, target_cpu, cpu_cores, require_kvm, ram_size_mb,
logs_dir):
super(QemuTarget, self).__init__(out_dir, target_cpu, logs_dir)
self._cpu_cores=cpu_cores
self._require_kvm=require_kvm
self._ram_size_mb=ram_size_mb
@staticmethod
def CreateFromArgs(args):
return QemuTarget(args.out_dir, args.target_cpu, args.cpu_cores,
args.require_kvm, args.ram_size_mb, args.logs_dir)
def _IsKvmEnabled(self):
kvm_supported = sys.platform.startswith('linux') and \
os.access('/dev/kvm', os.R_OK | os.W_OK)
same_arch = \
(self._target_cpu == 'arm64' and platform.machine() == 'aarch64') or \
(self._target_cpu == 'x64' and platform.machine() == 'x86_64')
if kvm_supported and same_arch:
return True
elif self._require_kvm:
if same_arch:
if not os.path.exists('/dev/kvm'):
kvm_error = 'File /dev/kvm does not exist. Please install KVM first.'
else:
kvm_error = 'To use KVM acceleration, add user to the kvm group '\
'with "sudo usermod -a -G kvm $USER". Log out and back '\
'in for the change to take effect.'
raise FuchsiaTargetException(kvm_error)
else:
raise FuchsiaTargetException('KVM unavailable when CPU architecture '\
'of host is different from that of'\
' target. See --allow-no-kvm.')
else:
return False
def _BuildQemuConfig(self):
boot_data.AssertBootImagesExist(self._GetTargetSdkArch(), 'qemu')
emu_command = [
'-kernel',
EnsurePathExists(
boot_data.GetTargetFile('qemu-kernel.kernel',
self._GetTargetSdkArch(),
boot_data.TARGET_TYPE_QEMU)),
'-initrd',
EnsurePathExists(
boot_data.GetBootImage(self._out_dir, self._GetTargetSdkArch(),
boot_data.TARGET_TYPE_QEMU)),
'-m',
str(self._ram_size_mb),
'-smp',
str(self._cpu_cores),
# Attach the blobstore and data volumes. Use snapshot mode to discard
# any changes.
'-snapshot',
'-drive',
'file=%s,format=qcow2,if=none,id=blobstore,snapshot=on' %
_EnsureBlobstoreQcowAndReturnPath(self._out_dir,
self._GetTargetSdkArch()),
'-object',
'iothread,id=iothread0',
'-device',
'virtio-blk-pci,drive=blobstore,iothread=iothread0',
# Use stdio for the guest OS only; don't attach the QEMU interactive
# monitor.
'-serial',
'stdio',
'-monitor',
'none',
]
# Configure the machine to emulate, based on the target architecture.
if self._target_cpu == 'arm64':
emu_command.extend([
'-machine','virt,gic-version=3',
])
else:
emu_command.extend([
'-machine', 'q35',
])
# Configure virtual network.
netdev_type = 'virtio-net-pci'
netdev_config = 'type=user,id=net0,restrict=off'
self._host_ssh_port = common.GetAvailableTcpPort()
netdev_config += ",hostfwd=tcp::%s-:22" % self._host_ssh_port
emu_command.extend([
'-netdev', netdev_config,
'-device', '%s,netdev=net0,mac=%s' % (netdev_type, GUEST_MAC_ADDRESS),
])
# Configure the CPU to emulate.
# On Linux, we can enable lightweight virtualization (KVM) if the host and
# guest architectures are the same.
if self._IsKvmEnabled():
kvm_command = ['-enable-kvm', '-cpu']
if self._target_cpu == 'arm64':
kvm_command.append('host')
else:
kvm_command.append('host,migratable=no,+invtsc')
else:
logging.warning('Unable to launch %s with KVM acceleration. '
'The guest VM will be slow.' % (self.EMULATOR_NAME))
if self._target_cpu == 'arm64':
kvm_command = ['-cpu', 'cortex-a53']
else:
kvm_command = ['-cpu', 'Haswell,+smap,-check,-fsgsbase']
emu_command.extend(kvm_command)
kernel_args = boot_data.GetKernelArgs()
# TERM=dumb tells the guest OS to not emit ANSI commands that trigger
# noisy ANSI spew from the user's terminal emulator.
kernel_args.append('TERM=dumb')
# Construct kernel cmd line
kernel_args.append('kernel.serial=legacy')
# Don't 'reboot' the emulator if the kernel crashes
kernel_args.append('kernel.halt-on-panic=true')
emu_command.extend(['-append', ' '.join(kernel_args)])
return emu_command
def _BuildCommand(self):
if self._target_cpu == 'arm64':
qemu_exec = 'qemu-system-' + 'aarch64'
elif self._target_cpu == 'x64':
qemu_exec = 'qemu-system-' + 'x86_64'
else:
raise Exception('Unknown target_cpu %s:' % self._target_cpu)
qemu_command = [
os.path.join(GetEmuRootForPlatform(self.EMULATOR_NAME), 'bin',
qemu_exec)
]
qemu_command.extend(self._BuildQemuConfig())
qemu_command.append('-nographic')
return qemu_command
def _ComputeFileHash(filename):
hasher = hashlib.md5()
with open(filename, 'rb') as f:
buf = f.read(4096)
while buf:
hasher.update(buf)
buf = f.read(4096)
return hasher.hexdigest()
def _EnsureBlobstoreQcowAndReturnPath(out_dir, target_arch):
"""Returns a file containing the Fuchsia blobstore in a QCOW format,
with extra buffer space added for growth."""
qimg_tool = os.path.join(common.GetEmuRootForPlatform('qemu'),
'bin', 'qemu-img')
fvm_tool = common.GetHostToolPathFromPlatform('fvm')
blobstore_path = boot_data.GetTargetFile('storage-full.blk', target_arch,
'qemu')
qcow_path = os.path.join(out_dir, 'gen', 'blobstore.qcow')
# Check a hash of the blobstore to determine if we can re-use an existing
# extended version of it.
blobstore_hash_path = os.path.join(out_dir, 'gen', 'blobstore.hash')
current_blobstore_hash = _ComputeFileHash(blobstore_path)
if os.path.exists(blobstore_hash_path) and os.path.exists(qcow_path):
if current_blobstore_hash == open(blobstore_hash_path, 'r').read():
return qcow_path
# Add some extra room for growth to the Blobstore volume.
# Fuchsia is unable to automatically extend FVM volumes at runtime so the
# volume enlargement must be performed prior to QEMU startup.
# The 'fvm' tool only supports extending volumes in-place, so make a
# temporary copy of 'blobstore.bin' before it's mutated.
extended_blobstore = tempfile.NamedTemporaryFile()
shutil.copyfile(blobstore_path, extended_blobstore.name)
subprocess.check_call([fvm_tool, extended_blobstore.name, 'extend',
'--length', str(EXTENDED_BLOBSTORE_SIZE),
blobstore_path])
# Construct a QCOW image from the extended, temporary FVM volume.
# The result will be retained in the build output directory for re-use.
qemu_img_cmd = [qimg_tool, 'convert', '-f', 'raw', '-O', 'qcow2',
'-c', extended_blobstore.name, qcow_path]
# TODO(crbug.com/1046861): Remove arm64 call with retries when bug is fixed.
if common.GetHostArchFromPlatform() == 'arm64':
qemu_image.ExecQemuImgWithRetry(qemu_img_cmd)
else:
subprocess.check_call(qemu_img_cmd)
# Write out a hash of the original blobstore file, so that subsequent runs
# can trivially check if a cached extended FVM volume is available for reuse.
with open(blobstore_hash_path, 'w') as blobstore_hash_file:
blobstore_hash_file.write(current_blobstore_hash)
return qcow_path
|
ric2b/Vivaldi-browser
|
chromium/build/fuchsia/qemu_target.py
|
Python
|
bsd-3-clause
| 8,672
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(nduca): Rewrite what some of these tests to use mocks instead of
# actually talking to the device. This would improve our coverage quite
# a bit.
import unittest
from chrome_remote_control import cros_interface
from chrome_remote_control import options_for_unittests
from chrome_remote_control import run_tests
class CrOSInterfaceTest(unittest.TestCase):
@run_tests.RequiresBrowserOfType('cros-chrome')
def testDeviceSideProcessFailureToLaunch(self):
remote = options_for_unittests.Get().cros_remote
cri = cros_interface.CrOSInterface(
remote,
options_for_unittests.Get().cros_ssh_identity)
def WillFail():
dsp = cros_interface.DeviceSideProcess(
cri,
['sfsdfskjflwejfweoij'])
dsp.Close()
self.assertRaises(OSError, WillFail)
@run_tests.RequiresBrowserOfType('cros-chrome')
def testDeviceSideProcessCloseDoesClose(self):
remote = options_for_unittests.Get().cros_remote
cri = cros_interface.CrOSInterface(
remote,
options_for_unittests.Get().cros_ssh_identity)
with cros_interface.DeviceSideProcess(
cri,
['sleep', '111']) as dsp:
procs = cri.ListProcesses()
sleeps = [x for x in procs
if x[1] == 'sleep 111']
assert dsp.IsAlive()
procs = cri.ListProcesses()
sleeps = [x for x in procs
if x[1] == 'sleep 111']
self.assertEquals(len(sleeps), 0)
@run_tests.RequiresBrowserOfType('cros-chrome')
def testPushContents(self):
remote = options_for_unittests.Get().cros_remote
cri = cros_interface.CrOSInterface(
remote,
options_for_unittests.Get().cros_ssh_identity)
cri.GetCmdOutput(['rm', '-rf', '/tmp/testPushContents'])
cri.PushContents('hello world', '/tmp/testPushContents')
contents = cri.GetFileContents('/tmp/testPushContents')
self.assertEquals(contents, 'hello world')
@run_tests.RequiresBrowserOfType('cros-chrome')
def testExists(self):
remote = options_for_unittests.Get().cros_remote
cri = cros_interface.CrOSInterface(
remote,
options_for_unittests.Get().cros_ssh_identity)
self.assertTrue(cri.FileExistsOnDevice('/proc/cpuinfo'))
self.assertTrue(cri.FileExistsOnDevice('/etc/passwd'))
self.assertFalse(cri.FileExistsOnDevice('/etc/sdlfsdjflskfjsflj'))
@run_tests.RequiresBrowserOfType('cros-chrome')
def testGetFileContents(self): # pylint: disable=R0201
remote = options_for_unittests.Get().cros_remote
cri = cros_interface.CrOSInterface(
remote,
options_for_unittests.Get().cros_ssh_identity)
hosts = cri.GetFileContents('/etc/hosts')
assert hosts.startswith('# /etc/hosts')
@run_tests.RequiresBrowserOfType('cros-chrome')
def testGetFileContentsForSomethingThatDoesntExist(self):
remote = options_for_unittests.Get().cros_remote
cri = cros_interface.CrOSInterface(
remote,
options_for_unittests.Get().cros_ssh_identity)
self.assertRaises(
OSError,
lambda: cri.GetFileContents('/tmp/209fuslfskjf/dfsfsf'))
@run_tests.RequiresBrowserOfType('cros-chrome')
def testListProcesses(self): # pylint: disable=R0201
remote = options_for_unittests.Get().cros_remote
cri = cros_interface.CrOSInterface(
remote,
options_for_unittests.Get().cros_ssh_identity)
with cros_interface.DeviceSideProcess(
cri,
['sleep', '11']):
procs = cri.ListProcesses()
sleeps = [x for x in procs
if x[1] == 'sleep 11']
assert len(sleeps) == 1
@run_tests.RequiresBrowserOfType('cros-chrome')
def testIsServiceRunning(self):
remote = options_for_unittests.Get().cros_remote
cri = cros_interface.CrOSInterface(
remote,
options_for_unittests.Get().cros_ssh_identity)
self.assertTrue(cri.IsServiceRunning('openssh-server'))
|
junmin-zhu/chromium-rivertrail
|
tools/chrome_remote_control/chrome_remote_control/cros_interface_unittest.py
|
Python
|
bsd-3-clause
| 4,005
|
'''Extron USB Switcher - see https://www.extron.com/download/files/userman/68-1517-01_F_SW_USB_UG_f.pdf'''
# <!-- parameters
param_disabled = Parameter({'desc': 'Disables this node', 'schema': {'type': 'boolean'}})
param_ipAddress = Parameter({ 'title': 'IP Address (normally of serial bridge)', 'schema': {'type': 'string' }})
DEFAULT_PORT = 4999
param_port = Parameter({'schema': {'type': 'integer', 'hint': '%s (default)' % DEFAULT_PORT}})
# -->
InputCount_byPartNum = {
'60-954-02': 4, # SW4 USB Plus
'60-952-02': 2, # SW2
'60-953-02': 4, # SW4
}
# <!-- main entry-point
def main():
console.info("Recipe has started!")
# -->
# <!-- Extron protocol
def checkForErrors(resp, onSuccess):
if resp.startswith('E'):
raise Exception('Got error code [%s]' % resp)
lastReceive[0] = system_clock()
onSuccess(resp)
local_event_Firmware = LocalEvent({'group': 'Device Info', 'order': next_seq(), 'schema': {'type': 'string'}})
@local_action({'group': 'Device Info', 'order': next_seq()})
def pollFirmware():
tcp.request('q', lambda raw: checkForErrors(raw, lambda resp: local_event_Firmware.emit(resp)))
# e.g. select input
# >> 2!
# << Chn2
local_event_Input = LocalEvent({'group': 'Switching', 'order': next_seq(), 'schema': {'type': 'integer'}})
def handleSwitchResp(resp):
# e.g. "Chn2" or "Chn3" OR
# "Chn4 InACT0110 OutACT0000 Emul11" from poll input
if not resp.startswith('Chn'):
raise Warning('Unexpected switch resp [%s]' % resp)
i = int(resp[3])
local_event_Input.emit(i)
@local_action({'group': 'Switching', 'order': next_seq(), 'schema': {'type': 'integer'}})
def selectInput(arg):
tcp.request('%s!' % arg, lambda raw: checkForErrors(raw, handleSwitchResp))
# e.g. request information:
# >> I
# << Chn4 InACT0110 OutACT0000 Emul11
@local_action({'group': 'Switching', 'order': next_seq()})
def pollInput():
tcp.request('I', lambda raw: checkForErrors(raw, handleSwitchResp))
poller = Timer(lambda: pollInput.call(), 10, 5)
local_event_PartNumber = LocalEvent({'group': 'Device Info', 'order': next_seq(), 'schema': {'type': 'string'}})
@local_action({'group': 'Device Info', 'order': next_seq()})
def pollPartNumber():
def handler(arg):
local_event_PartNumber.emit(arg)
tcp.request('N', lambda raw: checkForErrors(raw, handler))
part_poller = Timer(lambda: pollPartNumber.call(), 60, 6)
# dynamically created discrete input switching actions and signals
@after_main
def handleDynamicConfig():
def handler(value):
# check if already completed
if lookup_local_action('Input 1'):
return
inCount = InputCount_byPartNum.get(value)
if not inCount:
console.warn('Unknown model; assuming has 4 inputs')
inCount = 4
for i in range(1, inCount+1):
bindDynamicInput(i)
local_event_PartNumber.addEmitHandler(handler)
def bindDynamicInput(i):
switchAction = Action('Input %s' % i, lambda ignore: selectInput.call(i), {'title': 'Input %s' % i, 'group': 'Switching', 'order': next_seq()})
switchedSignal = Event('Input %s' % i, {'group': 'Switching', 'order': next_seq(), 'schema': {'type': 'boolean'}})
def onSwitch(switchedInput):
switchedSignal.emitIfDifferent(i == switchedInput)
local_event_Input.addEmitHandler(onSwitch)
def slaveSwitch(arg):
if arg == None or arg: # handle both state and stateless arg
switchAction.call()
# optional slave switching
remoteSwitching = create_remote_event('Slave Switch Input %s' % i,
slaveSwitch,
{'group': 'Switching', 'order': next_seq()})
# -->
# <!-- TCP: this section demonstrates some TCP functions
def tcp_connected():
console.info('tcp_connected')
tcp.clearQueue()
def tcp_disconnected():
console.warn('tcp_disconnected')
def tcp_timeout():
console.warn('tcp_timeout')
def tcp_sent(data):
log(1, "tcp_sent [%s]" % data)
def tcp_received(data):
log(1, "tcp_received [%s]" % data)
tcp = TCP(connected=tcp_connected,
disconnected=tcp_disconnected,
sent=tcp_sent,
received=tcp_received,
timeout=tcp_timeout,
sendDelimiters=None,
receiveDelimiters='\r\n')
@after_main # another main entry-point
def setup_tcp():
if param_disabled:
console.warn('Node is disabled; will not connect TCP')
return
if not param_ipAddress:
console.warn('IP address has not been specified')
return
dest = '%s:%s' % (param_ipAddress, param_port or DEFAULT_PORT)
console.info('Will connect to TCP %s' % dest)
tcp.setDest(dest)
# <status and error reporting ---
# for comms drop-out
lastReceive = [0]
# roughly, the last contact
local_event_LastContactDetect = LocalEvent({'group': 'Status', 'order': 99999+next_seq(), 'title': 'Last contact detect', 'schema': {'type': 'string'}})
local_event_Status = LocalEvent({'group': 'Status', 'order': 99999+next_seq(), 'schema': {'type': 'object', 'properties': {
'level': {'type': 'integer', 'order': 1},
'message': {'type': 'string', 'order': 2}}}})
def statusCheck():
diff = (system_clock() - lastReceive[0])/1000.0 # (in secs)
now = date_now()
if diff > status_check_interval+15:
previousContactValue = local_event_LastContactDetect.getArg()
if previousContactValue == None:
message = 'Always been missing.'
else:
previousContact = date_parse(previousContactValue)
roughDiff = (now.getMillis() - previousContact.getMillis())/1000/60
if roughDiff < 60:
message = 'Missing for approx. %s mins' % roughDiff
elif roughDiff < (60*24):
message = 'Missing since %s' % previousContact.toString('h:mm:ss a')
else:
message = 'Missing since %s' % previousContact.toString('h:mm:ss a, E d-MMM')
local_event_Status.emit({'level': 2, 'message': message})
else:
# update contact info
local_event_LastContactDetect.emit(str(now))
# TODO: check internal device status if possible
local_event_LastContactDetect.emit(str(now))
local_event_Status.emit({'level': 0, 'message': 'OK'})
status_check_interval = 75
status_timer = Timer(statusCheck, status_check_interval)
# --!>
# <!-- logging
local_event_LogLevel = LocalEvent({'group': 'Debug', 'order': 10000+next_seq(), 'desc': 'Use this to ramp up the logging (with indentation)',
'schema': {'type': 'integer'}})
def warn(level, msg):
if local_event_LogLevel.getArg() >= level:
console.warn((' ' * level) + msg)
def log(level, msg):
if local_event_LogLevel.getArg() >= level:
console.log((' ' * level) + msg)
# --!>
|
museumvictoria/nodel-recipes
|
Extron USB Series USB Switcher/script.py
|
Python
|
mit
| 6,816
|
#!/usr/bin/env python
"""
Created on Fri Jun 28 14:48:56 2013
@author: jb
"""
def sqlQueryMetatags(style,f):
import sqlalchemy
orcl_engine = sqlalchemy.create_engine('oracle+cx_oracle://jbragato:Blu3f!y@192.168.30.66:1531/dssprd1')
connection = orcl_engine.connect()
querymake_metatags="""SELECT DISTINCT
POMGR_SNP.PRODUCT_COLOR.ID AS colorstyle,
POMGR_SNP.BRAND.NAME AS brand,
POMGR_SNP.COLOR_GROUP.DESCRIPTION AS color_group,
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.LABEL AS category_parent,
POMGR_SNP.PRODUCT_FOLDER.LABEL AS category_sub,
MAX(ATG_SNP.EVENT.ID) AS event_id,
ATG_SNP.EVENT.EVENT_DESCRIPTION AS event_title,
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.PATH AS product_path,
ATG_SNP.EVENT.SHOT_LIST_DATE AS shot_list_dt,
ATG_SNP.EVENT.BRAND_EDITORIAL AS brand_editorial,
ATG_SNP.EVENT.CATEGORY AS cat_id,
POMGR_SNP.PRODUCT_COLOR.VENDOR_STYLE AS vendor_style,
POMGR_SNP.LK_PRODUCT_STATUS.NAME AS production_status
FROM
POMGR_SNP.PRODUCT_COLOR
LEFT JOIN ATG_SNP.EVENT_PRODUCT_COLOR
ON
POMGR_SNP.PRODUCT_COLOR.ID = ATG_SNP.EVENT_PRODUCT_COLOR.PRODUCT_COLOR_ID
LEFT JOIN POMGR_SNP.LK_PRODUCT_STATUS
ON
POMGR_SNP.PRODUCT_COLOR.PRODUCTION_STATUS_ID = POMGR_SNP.LK_PRODUCT_STATUS.ID
LEFT JOIN ATG_SNP.EVENT
ON
ATG_SNP.EVENT_PRODUCT_COLOR.EVENT_ID = ATG_SNP.EVENT.ID
LEFT JOIN POMGR_SNP.PRODUCT
ON
POMGR_SNP.PRODUCT_COLOR.PRODUCT_ID = POMGR_SNP.PRODUCT.ID
LEFT JOIN POMGR_SNP.PRODUCT_FOLDER
ON
POMGR_SNP.PRODUCT.PRODUCT_FOLDER_ID = POMGR_SNP.PRODUCT_FOLDER.ID
LEFT JOIN POMGR_SNP.BRAND
ON
POMGR_SNP.PRODUCT.BRAND_ID = POMGR_SNP.BRAND.ID
LEFT JOIN POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED
ON
POMGR_SNP.PRODUCT_FOLDER.PARENT_PRODUCT_FOLDER_ID =
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.ID
LEFT JOIN POMGR_SNP.COLOR_GROUP
ON
POMGR_SNP.PRODUCT_COLOR.COLOR_GROUP_ID = POMGR_SNP.COLOR_GROUP.ID
WHERE
POMGR_SNP.PRODUCT_COLOR.ID = COLORSTYLESEARCH
GROUP BY
POMGR_SNP.PRODUCT_COLOR.ID,
POMGR_SNP.BRAND.NAME,
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.LABEL,
POMGR_SNP.PRODUCT_FOLDER.LABEL,
ATG_SNP.EVENT.EVENT_DESCRIPTION,
POMGR_SNP.COLOR_GROUP.DESCRIPTION,
POMGR_SNP.PRODUCT_FOLDER_DENORMALIZED.PATH,
POMGR_SNP.PRODUCT_COLOR.VENDOR_STYLE,
ATG_SNP.EVENT.SHOT_LIST_DATE,
ATG_SNP.EVENT.BRAND_EDITORIAL,
ATG_SNP.EVENT.CATEGORY,
POMGR_SNP.LK_PRODUCT_STATUS.NAME
ORDER BY
POMGR_SNP.PRODUCT_COLOR.ID DESC"""
## --POMGR_SNP.PRODUCT_COLOR.MODIFIED_DATE >= TRUNC(SysDate - 365)
## --RENAME INPUT VARIABLE PRIOR TO QUERY
querymake_metatags = querymake_metatags.replace('COLORSTYLESEARCH', str(style))
result = connection.execute(querymake_metatags)
metatags = {}
for row in result:
metatag = {}
# metatag['colorstyle'] = row['colorstyle']
# metatag['IPTC:PONumber'] = row['po_num']
metatag['IPTC:VendorStyle'] = row['vendor_style']
metatag['IPTC:Brand'] = row['brand']
metatag['XMP:Genre'] = row['color_group']
metatag['IPTC:ProductType'] = row['category_sub']
metatag['EventID'] = row['event_id']
try:
metatag['XMP:Album'] = "EventID " + str(row['event_id'])
except:
pass
metatag['IPTC:Credit'] = row['product_path']
metatag['IPTC:CopyrightNotice'] = row['brand']
metatag['IPTC:SpecialInstructions'] = row['production_status']
metatag['Keywords'] = row['category_parent']
metatag['IPTC:Source'] = row['shot_list_dt']
# metatag['IPTC:SpecialInstructions'] = '{:%Y-%m-%d}'.format(metatag['brand_editorial'])
# metatag['IPTC:SampleStatusDate'] = '{:%Y-%m-%d}'.format(row['sample_dt'])
# metatag['IPTC:Source'] = '{:%Y-%m-%d}'.format(row['sample_dt'])
# metatag['IPTC:Source'] = row['sample_dt']
# metatag['SourceFile'] = f
## file path as dict KEY
metatags[f] = metatag
## colorstyle as dict KEY
#metatags[row['colorstyle']] = metatag
connection.close()
return metatags
def get_dbinfo_for_metatags_singlefile(f):
import os
metafield_dict = {}
listed = []
stylefile = os.path.basename(f)
style = stylefile.split('_')[0]
#print style, f
### string = key/val as k=filepath, val=all metadata as k/v pairs
exiftoolstring = sqlQueryMetatags(style,f)
#pairs = zip(exiftoolstring.values(), exiftoolstring.keys())
for k,v in exiftoolstring.iteritems():
tmpd = {}
for val in v:
tmpd[val] = v[val]
listed.append(tmpd)
metafield_dict[k] = tmpd
return metafield_dict
#return listed
def embed_exif_metadata(image_filepath, exiftag=None, exifvalue=None):
from PIL import Image
import pyexiv2
# Read EXIF data to initialize
image_metadata = pyexiv2.ImageMetadata(image_filepath)
image_metadata.read()
# Add and Write new Tag to File
image_metadata[exiftag] = exifvalue
image_metadata.write()
return image_filepath
def get_exif_metadata_value(image_filepath, exiftag=None, exifvalue=None):
from PIL import Image
import pyexiv2
if exifvalue:
pass
else:
# Read EXIF data to initialize
image_metadata = pyexiv2.ImageMetadata(image_filepath)
metadata = image_metadata.read()
# Add and Write new Tag to File
exifvalue = metadata[exiftag]
# image_metadata[exiftag] = exifvalue
# image_metadata.write()
# else:
# print "Not Yet Built"
return image_filepath
def write_metadata_file(filename):
import sys
import os
import glob
import sqlalchemy
metadict = get_dbinfo_for_metatags_singlefile(filename)
exiftags = []
exifdict = {}
for k,v in metadict.items():
metatags = []
for val in v:
#m = []
filename = str(k)
exiftag = val
exifvalue = v[val]
#exifpart = str(' -' + "'" + str(exiftag) + "=" + str(exifvalue) + "'" + ''),
exifpart = "-'{exiftag}'='{exifvalue}'".format(exiftag=exiftag,exifvalue=exifvalue)
metatags.append(exifpart)
#print metatags
#m.append(exifpart)
#print val,v[val]
#exifdict[filename] = [x for x in metatags]
#metatags = (str(tag) for tag in metatags)
exifdict[filename] = " ".join(metatags)
execlist = []
for key,value in exifdict.iteritems():
execstring = "exiftool -m -overwrite_original_in_place -fast2 -q {0} {1}".format(value,key)
execlist.append(execstring)
for line in execlist:
try:
os.system(line)
print line
except:
pass
def recursive_dirlist(rootdir):
import os, re
regex_raw = re.compile(r'.+?.CR2$')
regex_jpg = re.compile(r'.+?.jpg$')
walkedlist = []
for dirname, subdirnames, filenames in os.walk(rootdir):
# append path of all filenames to walkedlist
for filename in filenames:
file_path = os.path.abspath(os.path.join(dirname, filename))
if os.path.isfile(file_path):
if re.findall(regex_raw, file_path):
walkedlist.append(file_path)
# Advanced usage:
# editing the 'dirnames' list will stop os.walk() from recursing into there.
#if '.git' in dirnames:
# don't go into any .git directories.
# dirnames.remove('.git')
walkedset = list(set(sorted(walkedlist)))
return walkedset
##################### Begin CMDS ##############
import sys
import os
import glob
import sqlalchemy
if sys.argv[1]:
rootdir = sys.argv[1]
else:
rootdir = '.'
files_to_tag = recursive_dirlist(rootdir)
for filename in files_to_tag:
try:
write_metadata_file(filename)
except:
print "Failed to Write to {}".format(filename)
|
relic7/prodimages
|
python/mtags_multifile_RAWJPG.py
|
Python
|
mit
| 8,244
|
"""
Lesson 5 - fly at given height
"""
import sys
sys.path.append('..') # access to drone source without installation
from ardrone2 import ARDrone2, ManualControlException
def testLesson5( drone ):
desiredSpeed = 0.8 # in meters per second
desiredHeight = 1.5
try:
drone.takeoff()
startTime = drone.time
while drone.time - startTime < 10.0:
altitude = desiredHeight
if drone.altitudeData != None:
altVision = drone.altitudeData[0]/1000.0
altSonar = drone.altitudeData[3]/1000.0
altitude = (altSonar+altVision)/2.0
if abs(altSonar-altVision) > 0.5:
print altSonar, altVision
altitude = max( altSonar, altVision ) # sonar is 0.0 sometimes (no ECHO)
sz = max( -0.2, min( 0.2, desiredHeight - altitude ))
if altitude > 2.5:
# wind and "out of control"
sz = max( -0.5, min( 0.5, desiredHeight - altitude ))
sx = max( 0, min( drone.speed, desiredSpeed - drone.vx ))
sy, sa = 0.0, 0.0
drone.moveXYZA( sx, sy, sz, sa )
except ManualControlException, e:
print "ManualControlException"
if drone.ctrlState == 3: # CTRL_FLYING=3 ... i.e. stop the current motion
drone.hover(0.1)
drone.land()
print "Battery", drone.battery
if __name__ == "__main__":
import launcher
launcher.launch( sys.argv, ARDrone2, testLesson5 )
# vim: expandtab sw=4 ts=4
|
TeaPackCZ/heidi
|
guide/lesson5.py
|
Python
|
mit
| 1,545
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Copyright (c) 2015-2019 The Mincoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test multiple rpc user config option rpcauth
#
from test_framework.test_framework import MincoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (MincoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
#Append rpcauth to mincoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "mincoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
xieta/mincoin
|
qa/rpc-tests/multi_rpc.py
|
Python
|
mit
| 4,629
|
# Copyright (c) 2021, Frappe Technologies and contributors
# License: MIT. See LICENSE
import hashlib
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.query_builder import DocType
from frappe.utils import cint, now_datetime
class TransactionLog(Document):
def before_insert(self):
index = get_current_index()
self.row_index = index
self.timestamp = now_datetime()
if index != 1:
prev_hash = frappe.get_all("Transaction Log", filters={"row_index":str(index-1)}, pluck="chaining_hash", limit=1)
if prev_hash:
self.previous_hash = prev_hash[0]
else:
self.previous_hash = "Indexing broken"
else:
self.previous_hash = self.hash_line()
self.transaction_hash = self.hash_line()
self.chaining_hash = self.hash_chain()
self.checksum_version = "v1.0.1"
def hash_line(self):
sha = hashlib.sha256()
sha.update(
frappe.safe_encode(str(self.row_index))
+ frappe.safe_encode(str(self.timestamp))
+ frappe.safe_encode(str(self.data))
)
return sha.hexdigest()
def hash_chain(self):
sha = hashlib.sha256()
sha.update(frappe.safe_encode(str(self.transaction_hash)) + frappe.safe_encode(str(self.previous_hash)))
return sha.hexdigest()
def get_current_index():
series = DocType("Series")
current = (
frappe.qb.from_(series)
.where(series.name == "TRANSACTLOG")
.for_update()
.select("current")
).run()
if current and current[0][0] is not None:
current = current[0][0]
frappe.db.sql("""UPDATE `tabSeries`
SET `current` = `current` + 1
where `name` = 'TRANSACTLOG'""")
current = cint(current) + 1
else:
frappe.db.sql("INSERT INTO `tabSeries` (name, current) VALUES ('TRANSACTLOG', 1)")
current = 1
return current
|
almeidapaulopt/frappe
|
frappe/core/doctype/transaction_log/transaction_log.py
|
Python
|
mit
| 1,734
|
import os
import sys
import re
import types
import itertools
import math
import numpy
from PeakcallingReport import *
class PeakCallingStatus(DefaultTracker, Status):
'''status information for mapping stage.'''
def testCalling(self, track):
'''number of peaks called. The number of peaks expected in a sample
will of course vary wildly.
PASS : >1000 peaks called
WARN : >100 peaks called
FAIL : <100 peaks called
'''
suffix = re.sub("^[^_]*_", "", self.pattern[:-1])
value = self.getValue(
"""SELECT COUNT(*) FROM %(track)s_%(suffix)s""" )
if value >= 1000:
status = "PASS"
elif value >= 100:
status = "WARNING"
else:
status = "FAIL"
return status, "%i" % value
class PeakCallingStatusMACS(PeakCallingStatus):
pattern = ("(.*)_macs_regions$")
class PeakCallingStatusMACS2(PeakCallingStatus):
pattern = ("(.*)_macs2_regions$")
class PeakCallingStatusSPP(PeakCallingStatus):
pattern = ("(.*)_spp_regions$")
class PeakCallingStatusSICER(PeakCallingStatus):
pattern = ("(.*)_sicer_regions$")
class PeakCallingStatusZinba(PeakCallingStatus):
pattern = ("(.*)_zinba_regions$")
class PeakCallingStatusPeakRanger(PeakCallingStatus):
pattern = ("(.*)_peakranger_peaks$")
class PeakCallingStatusCCAT(PeakCallingStatus):
pattern = ("(.*)_ccat_peaks$")
class EncodeQualityMetrics(Status):
'''
See http://code.google.com/p/phantompeakqualtools/ and :pmid:`22955991`
'''
tablename = "spp_quality"
def getTracks(self):
return self.getValues("SELECT DISTINCT track FROM %(tablename)s")
def testNSC(self, track):
'''Normalized strand correlation coefficient (NSC)
PASS : >=1.1
WARN : >=1.05
FAIL : < 1.05
'''
value = self.getValue(
"""SELECT nsc FROM %(tablename)s WHERE track = '%(track)s'""" )
if value >= 1.1:
status = "PASS"
elif value >= 1.05:
status = "WARNING"
else:
status = "FAIL"
return status, "%f" % value
def testRSC(self, track):
'''Relative strand cross-correlation coefficient (RSC)
PASS : >=1.0
WARN : >= 0.8
FAIL : <0.8
'''
value = self.getValue(
"""SELECT rsc FROM %(tablename)s WHERE track = '%(track)s'""" )
if value >= 1.0:
status = "PASS"
elif value >= 0.8:
status = "WARNING"
else:
status = "FAIL"
return status, "%f" % value
def testMappedReads(self, track):
'''ENCODE recommends 10Mio uniquely mapped reads for mammalian genomes
for point-binding intervals and at least 20Mio mapped reads for broad peaks.
PASS : >= 20000000
WARN : >= 10000000
FAIL : <10000000
'''
value = self.getValue(
"""SELECT mapped_reads FROM %(tablename)s WHERE track = '%(track)s'""" )
if value >= 2e7:
status = "PASS"
elif value >= 1e7:
status = "WARNING"
else:
status = "FAIL"
return status, "%i" % value
|
CGATOxford/CGATPipelines
|
obsolete/reports/pipeline_peakcalling_obsolete/trackers/Status.py
|
Python
|
mit
| 3,244
|
"""
WSGI config for odm2testsite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "templatesAndSettings.settings.development") # noqa
application = get_wsgi_application()
|
ocefpaf/ODM2-Admin
|
templatesAndSettings/wsgi.py
|
Python
|
mit
| 428
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/structure/component/shared_structure_light_ore_mining_unit.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/draft_schematic/structure/component/shared_structure_light_ore_mining_unit.py
|
Python
|
mit
| 480
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_ewok_m_12.iff"
result.attribute_template_id = 9
result.stfName("npc_name","ewok_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_ewok_m_12.py
|
Python
|
mit
| 441
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "stuff.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
hnaoto/CS4501-ISA-Models
|
manage.py
|
Python
|
mit
| 248
|
##############################################################################################
# FIXME List:
#
##############################################################################################
import logging
import warnings
from enum import Enum
from dpl.core.things import Actuator
logger = logging.getLogger(__name__)
class Trigger(Actuator):
"""
Объект с двумя состояниями: включено и выключено
"""
class States(Enum):
"""
Возможные состояния триггера
"""
on = True
off = False
unknown = None
__COMMAND_LIST = ("toggle", "activate", "deactivate", "on", "off")
def __init__(self, con_instance, con_params, metadata=None):
"""
Конструктор, копия конструктора из базового класса
:param con_instance: экземпляр соединения
:param con_params: параметры доступа к соединению
:param metadata: метаданные объекта (см. свойство metadata)
"""
super().__init__(con_instance, con_params, metadata)
@property
def actions(self) -> tuple:
"""
Возвращает список всех доступных команд
:return: tuple
"""
return self.__COMMAND_LIST
@property
def is_active(self) -> bool:
"""
Находится ли объект в одном из активных состояний
:return: bool, True в состоянии on, False в других состояниях
"""
return self.state == self.States.on
def on(self) -> Actuator.ExecutionResult:
"""
Немедленно устанавливает триггер в состояние "включено"
"""
raise NotImplementedError
def off(self) -> Actuator.ExecutionResult:
"""
Немедленно устанавливает триггер в состояние "выключено"
"""
raise NotImplementedError
def set_on(self) -> Actuator.ExecutionResult:
"""
Немедленно устанавливает триггер в состояние "включено"
"""
warnings.warn("Deprecated, use on method instead", DeprecationWarning)
return self.on()
def set_off(self) -> Actuator.ExecutionResult:
"""
Немедленно устанавливает триггер в состояние "выключено"
"""
warnings.warn("Deprecated, use off method instead", DeprecationWarning)
return self.off()
def activate(self) -> Actuator.ExecutionResult:
"""
Переключает Trigger в состояние on
:return: Actuator.ExecutionResult
"""
return self.on()
def deactivate(self) -> Actuator.ExecutionResult:
"""
Переключает Trigger в состояние off
:return: Actuator.ExecutionResult
"""
return self.off()
def toggle(self) -> Actuator.ExecutionResult: # Fixme: CC1
"""
Переключить из текущего состояния в противоположное
:return: Actuator.ExecutionResult
"""
if self.state == self.States.unknown:
warnings.warn("Unknown state handling may be deleted", FutureWarning)
logger.debug(
"Unable to toggle %s object from %s state",
self,
self.state
)
return Actuator.ExecutionResult.IGNORED_BAD_STATE
return super().toggle()
|
dot-cat/dotcat_platform
|
dpl/core/things/trigger.py
|
Python
|
mit
| 3,799
|
from sympy.core import sympify, Lambda, Dummy, Integer, Rational, oo, Float, pi
from sympy.functions import sqrt, exp, erf
from sympy.printing import sstr
import random
class Sample(tuple):
"""
Sample([x1, x2, x3, ...]) represents a collection of samples.
Sample parameters like mean, variance and stddev can be accessed as
properties.
The sample will be sorted.
"""
def __new__(cls, sample):
s = tuple.__new__(cls, sorted(sample))
s.mean = mean = sum(s) / Integer(len(s))
s.variance = sum([(x-mean)**2 for x in s]) / Integer(len(s))
s.stddev = sqrt(s.variance)
if len(s) % 2:
s.median = s[len(s)//2]
else:
s.median = sum(s[len(s)//2-1:len(s)//2+1]) / Integer(2)
return s
def __repr__(self):
return sstr(self)
def __str__(self):
return sstr(self)
class ContinuousProbability(object):
"""Base class for continuous probability distributions"""
def probability(s, a, b):
"""Calculate the probability that a random number x generated
from the distribution satisfies a <= x <= b """
return s.cdf(b) - s.cdf(a)
def random(s, n=None):
"""
random() -- generate a random number from the distribution.
random(n) -- generate a Sample of n random numbers.
"""
if n is None:
return s._random()
else:
return Sample([s._random() for i in xrange(n)])
def __repr__(self):
return sstr(self)
def __str__(self):
return sstr(self)
class Normal(ContinuousProbability):
"""
Normal(mu, sigma) represents the normal or Gaussian distribution
with mean value mu and standard deviation sigma.
Example usage:
>>> from sympy.statistics import Normal
>>> from sympy import oo
>>> N = Normal(1, 2)
>>> N.mean
1
>>> N.variance
4
>>> N.probability(-oo, 1) # probability on an interval
1/2
>>> N.probability(1, oo)
1/2
>>> N.probability(-oo, oo)
1
>>> N.probability(-1, 3)
erf(sqrt(2)/2)
>>> _.evalf()
0.682689492137086
"""
def __init__(self, mu, sigma):
self.mu = sympify(mu)
self.sigma = sympify(sigma)
mean = property(lambda s: s.mu)
median = property(lambda s: s.mu)
mode = property(lambda s: s.mu)
stddev = property(lambda s: s.sigma)
variance = property(lambda s: s.sigma**2)
def pdf(s, x):
"""Return the probability density function as an expression in x"""
x = sympify(x)
return 1/(s.sigma*sqrt(2*pi)) * exp(-(x-s.mu)**2 / (2*s.sigma**2))
def cdf(s, x):
"""Return the cumulative density function as an expression in x"""
x = sympify(x)
return (1+erf((x-s.mu)/(s.sigma*sqrt(2))))/2
def _random(s):
return random.gauss(float(s.mu), float(s.sigma))
def confidence(s, p):
"""Return a symmetric (p*100)% confidence interval. For example,
p=0.95 gives a 95% confidence interval. Currently this function
only handles numerical values except in the trivial case p=1.
Examples usage:
# One standard deviation
>>> from sympy.statistics import Normal
>>> N = Normal(0, 1)
>>> N.confidence(0.68)
(-0.994457883209753, 0.994457883209753)
>>> N.probability(*_).evalf()
0.680000000000000
# Two standard deviations
>>> N = Normal(0, 1)
>>> N.confidence(0.95)
(-1.95996398454005, 1.95996398454005)
>>> N.probability(*_).evalf()
0.950000000000000
"""
if p == 1:
return (-oo, oo)
assert p <= 1
# In terms of n*sigma, we have n = sqrt(2)*ierf(p). The inverse
# error function is not yet implemented in SymPy but can easily be
# computed numerically
from sympy.mpmath import mpf, erfinv
# calculate y = ierf(p) by solving erf(y) - p = 0
y = erfinv(mpf(p))
t = Float(str(mpf(float(s.sigma)) * mpf(2)**0.5 * y))
mu = s.mu.evalf()
return (mu-t, mu+t)
@staticmethod
def fit(sample):
"""Create a normal distribution fit to the mean and standard
deviation of the given distribution or sample."""
if not hasattr(sample, "stddev"):
sample = Sample(sample)
return Normal(sample.mean, sample.stddev)
class Uniform(ContinuousProbability):
"""
Uniform(a, b) represents a probability distribution with uniform
probability density on the interval [a, b] and zero density
everywhere else.
"""
def __init__(self, a, b):
self.a = sympify(a)
self.b = sympify(b)
mean = property(lambda s: (s.a+s.b)/2)
median = property(lambda s: (s.a+s.b)/2)
mode = property(lambda s: (s.a+s.b)/2) # arbitrary
variance = property(lambda s: (s.b-s.a)**2 / 12)
stddev = property(lambda s: sqrt(s.variance))
def pdf(s, x):
"""Return the probability density function as an expression in x"""
x = sympify(x)
if not x.is_Number:
raise NotImplementedError("SymPy does not yet support"
"piecewise functions")
if x < s.a or x > s.b:
return Rational(0)
return 1/(s.b-s.a)
def cdf(s, x):
"""Return the cumulative density function as an expression in x"""
x = sympify(x)
if not x.is_Number:
raise NotImplementedError("SymPy does not yet support"
"piecewise functions")
if x <= s.a:
return Rational(0)
if x >= s.b:
return Rational(1)
return (x-s.a)/(s.b-s.a)
def _random(s):
return Float(random.uniform(float(s.a), float(s.b)))
def confidence(s, p):
"""Generate a symmetric (p*100)% confidence interval.
>>> from sympy import Rational
>>> from sympy.statistics import Uniform
>>> U = Uniform(1, 2)
>>> U.confidence(1)
(1, 2)
>>> U.confidence(Rational(1,2))
(5/4, 7/4)
"""
p = sympify(p)
assert p <= 1
d = (s.b-s.a)*p / 2
return (s.mean - d, s.mean + d)
@staticmethod
def fit(sample):
"""Create a uniform distribution fit to the mean and standard
deviation of the given distribution or sample."""
if not hasattr(sample, "stddev"):
sample = Sample(sample)
m = sample.mean
d = sqrt(12*sample.variance)/2
return Uniform(m-d, m+d)
class PDF(ContinuousProbability):
"""
PDF(func, (x, a, b)) represents continuous probability distribution
with probability distribution function func(x) on interval (a, b)
If func is not normalized so that integrate(func, (x, a, b)) == 1,
it can be normalized using PDF.normalize() method
Example usage:
>>> from sympy import Symbol, exp, oo
>>> from sympy.statistics.distributions import PDF
>>> from sympy.abc import x
>>> a = Symbol('a', positive=True)
>>> exponential = PDF(exp(-x/a)/a, (x,0,oo))
>>> exponential.pdf(x)
exp(-x/a)/a
>>> exponential.cdf(x)
1 - exp(-x/a)
>>> exponential.mean
a
>>> exponential.variance
a**2
"""
def __init__(self, pdf, var):
#XXX maybe add some checking of parameters
if isinstance(var, (tuple, list)):
self.pdf = Lambda(var[0], pdf)
self.domain = tuple(var[1:])
else:
self.pdf = Lambda(var, pdf)
self.domain = (-oo, oo)
self._cdf = None
self._mean = None
self._variance = None
self._stddev = None
def normalize(self):
"""
Normalize the probability distribution function so that
integrate(self.pdf(x), (x, a, b)) == 1
Example usage:
>>> from sympy import Symbol, exp, oo
>>> from sympy.statistics.distributions import PDF
>>> from sympy.abc import x
>>> a = Symbol('a', positive=True)
>>> exponential = PDF(exp(-x/a), (x,0,oo))
>>> exponential.normalize().pdf(x)
exp(-x/a)/a
"""
norm = self.probability(*self.domain)
if norm != 1:
w = Dummy('w', real=True)
return self.__class__(self.pdf(w)/norm, (w, self.domain[0], self.domain[1]))
#self._cdf = Lambda(w, (self.cdf(w) - self.cdf(self.domain[0]))/norm)
#if self._mean is not None:
# self._mean /= norm
#if self._variance is not None:
# self._variance = (self._variance + (self._mean*norm)**2)/norm - self.mean**2
#if self._stddev is not None:
# self._stddev = sqrt(self._variance)
else:
return self
def cdf(self, x):
x = sympify(x)
if self._cdf is not None:
return self._cdf(x)
else:
from sympy import integrate
w = Dummy('w', real=True)
self._cdf = integrate(self.pdf(w), w)
self._cdf = Lambda(w, self._cdf - self._cdf.subs(w, self.domain[0]))
return self._cdf(x)
def _get_mean(self):
if self._mean is not None:
return self._mean
else:
from sympy import integrate
w = Dummy('w', real=True)
self._mean = integrate(self.pdf(w)*w,(w,self.domain[0],self.domain[1]))
return self._mean
def _get_variance(self):
if self._variance is not None:
return self._variance
else:
from sympy import integrate, simplify
w = Dummy('w', real=True)
self._variance = integrate(self.pdf(w)*w**2,(w,self.domain[0],self.domain[1])) - self.mean**2
self._variance = simplify(self._variance)
return self._variance
def _get_stddev(self):
if self._stddev is not None:
return self._stddev
else:
self._stddev = sqrt(self.variance)
return self._stddev
mean = property(_get_mean)
variance = property(_get_variance)
stddev = property(_get_stddev)
def _random(s):
raise NotImplementedError
def transform(self,func,var):
"""Return a probability distribution of random variable func(x)
currently only some simple injective functions are supported"""
w = Dummy('w', real=True)
from sympy import solve
inverse = solve(func-w, var)
newPdf = S.Zero
funcdiff = func.diff(var)
#TODO check if x is in domain
for x in inverse:
# this assignment holds only for x in domain
# in general it would require implementing
# piecewise defined functions in sympy
newPdf += (self.pdf(var)/abs(funcdiff)).subs(var,x)
return PDF(newPdf, (w, func.subs(var, self.domain[0]), func.subs(var, self.domain[1])))
|
Cuuuurzel/KiPyCalc
|
sympy_old/statistics/distributions.py
|
Python
|
mit
| 11,156
|
from . ops import Ops
from . editor import Editor
from . receiver import Receiver
from .. util import deprecated
class Action(Receiver):
"""
An Action takes an incoming message, applies Ops to it, and then
uses it to set a value on a Editor.
"""
def __init__(self, address, ops=()):
self.address = Editor(address)
self.ops = Ops(*ops)
def set_project(self, project):
self.address.set_project(project)
def receive(self, values):
if self.ops:
if len(values) == 1:
values = [self.ops(values[0])]
else:
# TODO: They specified ops, but we can't use it.
# Should we warn here? Can we use the ops somehow?
pass
return self.address.receive(values)
def __bool__(self):
return bool(self.address or self.ops)
def __str__(self):
if self.ops:
return '%s->%s' % self.address, self.ops
return str(self.address)
@classmethod
def make(cls, action):
if isinstance(action, str):
return cls(action)
if isinstance(action, dict):
return cls(**action)
return cls(*action)
class ActionList(Receiver):
"""A list of Actions."""
def __init__(self, actions=None):
if isinstance(actions, (str, dict)):
actions = [actions]
self.actions = tuple(Action.make(a) for a in actions or ())
def set_project(self, project):
for a in self.actions:
a.set_project(project)
def receive(self, msg):
values = tuple(msg.values())
for action in self.actions:
action.receive(values)
def __bool__(self):
return bool(self.actions)
def __str__(self):
return ' + '.join(str(a) for a in self.actions)
|
rec/BiblioPixel
|
bibliopixel/control/action.py
|
Python
|
mit
| 1,836
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('twilio', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='sms',
name='pub_date',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 3, 14, 28, 3, 711647, tzinfo=utc), verbose_name=b'date published'),
preserve_default=False,
),
]
|
oscar8771p/python1987
|
PySms/twilio/migrations/0002_sms_pub_date.py
|
Python
|
mit
| 568
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import request
from werkzeug.exceptions import BadRequest
from indico.core.db import db
from indico.modules.events.abstracts.controllers.base import RHManageAbstractsBase
from indico.modules.events.abstracts.forms import (CreateEmailTemplateForm, EditEmailTemplateRuleForm,
EditEmailTemplateTextForm)
from indico.modules.events.abstracts.models.abstracts import AbstractState
from indico.modules.events.abstracts.models.email_templates import AbstractEmailTemplate
from indico.modules.events.abstracts.notifications import get_abstract_notification_tpl_module
from indico.modules.events.abstracts.util import build_default_email_template, create_mock_abstract
from indico.web.flask.templating import get_template_module
from indico.web.util import jsonify_data, jsonify_template
class RHEmailTemplateList(RHManageAbstractsBase):
"""Display list of e-mail templates."""
def _process(self):
return jsonify_template('events/abstracts/management/notification_tpl_list.html', event=self.event,
**_get_rules_fields(self.event))
class RHAddEmailTemplate(RHManageAbstractsBase):
"""Add a new e-mail template."""
def _process(self):
form = CreateEmailTemplateForm(event=self.event)
if form.validate_on_submit():
new_tpl = build_default_email_template(self.event, form.default_tpl.data)
form.populate_obj(new_tpl)
self.event.abstract_email_templates.append(new_tpl)
db.session.flush()
return _render_notification_list(self.event)
return jsonify_template('events/abstracts/management/notification_tpl_form.html', form=form)
class RHSortEmailTemplates(RHManageAbstractsBase):
"""Sort e-mail templates according to the order provided by the client."""
def _process(self):
sort_order = request.json['sort_order']
tpls = {s.id: s for s in self.event.abstract_email_templates}
for position, tpl_id in enumerate(sort_order, 1):
if tpl_id in tpls:
tpls[tpl_id].position = position
class RHEditEmailTemplateBase(RHManageAbstractsBase):
"""Base class for operations that involve editing an e-mail template."""
normalize_url_spec = {
'locators': {
lambda self: self.email_tpl
}
}
def _process_args(self):
RHManageAbstractsBase._process_args(self)
self.email_tpl = AbstractEmailTemplate.get_one(request.view_args['email_tpl_id'])
class RHEditEmailTemplateRules(RHEditEmailTemplateBase):
"""Edit the rules of a notification template."""
def _process(self):
form = EditEmailTemplateRuleForm(obj=self.email_tpl, event=self.event)
if form.validate_on_submit():
form.populate_obj(self.email_tpl)
return _render_notification_list(self.event)
return jsonify_template('events/abstracts/management/notification_tpl_form.html', form=form, is_edit=True)
class RHEditEmailTemplateText(RHEditEmailTemplateBase):
"""Edit the e-mail text of a notification template."""
def _process(self):
form = EditEmailTemplateTextForm(obj=self.email_tpl, event=self.event)
if form.validate_on_submit():
form.populate_obj(self.email_tpl)
return _render_notification_list(self.event)
return jsonify_template('events/abstracts/management/notification_tpl_text_form.html', form=form, is_edit=True)
class RHDeleteEmailTemplate(RHEditEmailTemplateBase):
"""Delete an e-mail template."""
def _process(self):
db.session.delete(self.email_tpl)
return _render_notification_list(self.event, flash=False)
class RHEmailTemplateREST(RHEditEmailTemplateBase):
"""Perform RESTful actions on an email template."""
def _process_PATCH(self):
if request.json is None:
raise BadRequest('Expected JSON payload')
invalid_fields = request.json.viewkeys() - {'stop_on_match'}
if invalid_fields:
raise BadRequest("Invalid fields: {}".format(', '.join(invalid_fields)))
if 'stop_on_match' in request.json:
self.email_tpl.stop_on_match = request.json['stop_on_match']
return jsonify_data(flash=False)
def _get_rules_fields(event):
abstract = create_mock_abstract(event)
email_tpl_dict = {et.id: get_abstract_notification_tpl_module(et, abstract)
for et in event.abstract_email_templates}
return {'tracks': {track.id: track for track in event.tracks},
'states': AbstractState.__titles__, 'contrib_types': {ct.id: ct for ct in event.contribution_types},
'email_tpls': email_tpl_dict}
def _render_notification_list(event, flash=True):
tpl = get_template_module('events/abstracts/management/_notification_tpl_list.html')
return jsonify_data(html=tpl.render_notification_list(event, **_get_rules_fields(event)), flash=flash)
|
mvidalgarcia/indico
|
indico/modules/events/abstracts/controllers/email_templates.py
|
Python
|
mit
| 5,238
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running dashd with the -rpcbind and -rpcallowip options."""
import sys
from test_framework.test_framework import BitcoinTestFramework, SkipTest
from test_framework.util import *
from test_framework.netutil import *
class RPCBindTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.bind_to_localhost_only = False
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def add_options(self, parser):
parser.add_option("--ipv4", action='store_true', dest="run_ipv4", help="Run ipv4 tests only", default=False)
parser.add_option("--ipv6", action='store_true', dest="run_ipv6", help="Run ipv6 tests only", default=False)
parser.add_option("--nonloopback", action='store_true', dest="run_nonloopback", help="Run non-loopback tests only", default=False)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
node_args = \
['-disablewallet', '-nolisten'] + \
['-rpcallowip='+x for x in allow_ips] + \
['-rpcbind='+addr for addr in ['127.0.0.1', "%s:%d" % (rpchost, rpcport)]] # Bind to localhost as well so start_nodes doesn't hang
self.nodes[0].rpchost = None
self.start_nodes([node_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(self.nodes[0].datadir, 0, self.chain, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if sum([self.options.run_ipv4, self.options.run_ipv6, self.options.run_nonloopback]) > 1:
raise AssertionError("Only one of --ipv4, --ipv6 and --nonloopback can be set")
self.log.info("Check for linux")
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
self.log.info("Check for ipv6")
have_ipv6 = test_ipv6_local()
if not have_ipv6 and not self.options.run_ipv4:
raise SkipTest("This test requires ipv6 support.")
self.log.info("Check for non-loopback interface")
self.non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
self.non_loopback_ip = ip
break
if self.non_loopback_ip is None and self.options.run_nonloopback:
raise SkipTest("This test requires a non-loopback ip address.")
self.defaultport = rpc_port(0)
if not self.options.run_nonloopback:
self._run_loopback_tests()
if not self.options.run_ipv4 and not self.options.run_ipv6:
self._run_nonloopback_tests()
def _run_loopback_tests(self):
if self.options.run_ipv4:
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', self.defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
else:
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check default with rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', self.defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', self.defaultport), ('::1', self.defaultport)])
def _run_nonloopback_tests(self):
self.log.info("Using interface %s for testing" % self.non_loopback_ip)
# check only non-loopback interface
self.run_bind_test([self.non_loopback_ip], self.non_loopback_ip, [self.non_loopback_ip],
[(self.non_loopback_ip, self.defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([self.non_loopback_ip], self.non_loopback_ip, self.defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], self.non_loopback_ip, self.defaultport)
if __name__ == '__main__':
RPCBindTest().main()
|
dashpay/dash
|
test/functional/rpc_bind.py
|
Python
|
mit
| 6,265
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Customized TensorFlow operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.tpu import tpu_random
random_uniform = tpu_random.uniform
random_normal = tpu_random.normal
|
ifding/ifding.github.io
|
gans/ops.py
|
Python
|
mit
| 866
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2016-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .tektronixMDO4000 import *
class tektronixMDO4014B(tektronixMDO4000):
"Tektronix MDO4014B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'MDO4014B')
super(tektronixMDO4014B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 16
self._bandwidth = 100e6
self._init_channels()
|
alexforencich/python-ivi
|
ivi/tektronix/tektronixMDO4014B.py
|
Python
|
mit
| 1,564
|
# $Id: octopus_conf_handler.py 2016-12-17 $
# Author: Coen Meerbeek <coen@buzzardlabs.com>
# Copyright: BuzzardLabs 2016
import splunk.admin as admin
import splunk.entity as en
# import your required python modules
'''
Copyright (C) 2005 - 2010 Splunk Inc. All Rights Reserved.
Description: This skeleton python script handles the parameters in the configuration page.
handleList method: lists configurable parameters in the configuration page
corresponds to handleractions = list in restmap.conf
handleEdit method: controls the parameters and saves the values
corresponds to handleractions = edit in restmap.conf
'''
class ConfigApp(admin.MConfigHandler):
'''
Set up supported arguments
'''
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for arg in ['hostname', 'protocol', 'apikey']:
self.supportedArgs.addOptArg(arg)
'''
Read the initial values of the parameters from the custom file
myappsetup.conf, and write them to the setup page.
If the app has never been set up,
uses .../app_name/default/myappsetup.conf.
If app has been set up, looks at
.../local/myappsetup.conf first, then looks at
.../default/myappsetup.conf only if there is no value for a field in
.../local/myappsetup.conf
For boolean fields, may need to switch the true/false setting.
For text fields, if the conf file says None, set to the empty string.
'''
def handleList(self, confInfo):
confDict = self.readConf("octopus")
if None != confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['hostname'] and val in [None, '']:
val = ''
elif key in ['protocol'] and val in [None, '']:
val = ''
elif key in ['apikey'] and val in [None, '']:
val = ''
confInfo[stanza].append(key, val)
'''
After user clicks Save on setup page, take updated parameters,
normalize them, and save them somewhere
'''
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
if self.callerArgs.data['hostname'][0] in [None, '']:
self.callerArgs.data['hostname'][0] = ''
if self.callerArgs.data['protocol'][0] in [None, '']:
self.callerArgs.data['protocol'][0] = ''
if self.callerArgs.data['apikey'][0] in [None, '']:
self.callerArgs.data['apikey'][0] = ''
'''
Since we are using a conf file to store parameters,
write them to the [octopus] stanza
in app_name/local/octopus.conf
'''
self.writeConf('octopus', 'octopus', self.callerArgs.data)
# initialize the handler
admin.init(ConfigApp, admin.CONTEXT_NONE)
|
cmeerbeek/splunk-addon-octopus-deploy
|
TA-OctopusNT-Fwd/bin/octopus_conf_handler.py
|
Python
|
mit
| 2,769
|
from django.contrib.syndication.views import Feed
from .models import Post
from .settings import (
SYNDICATION_FEED_TITLE, SYNDICATION_FEED_LINK,
SYNDICATION_FEED_DESCRIPTION, SYNDICATION_FEED_TYPE
)
class LatestPostFeed(Feed):
title = SYNDICATION_FEED_TITLE
link = SYNDICATION_FEED_LINK
description = SYNDICATION_FEED_DESCRIPTION
feed_type = SYNDICATION_FEED_TYPE
def items(self):
return Post.objects.recent()
def item_title(self, item):
return item.subject
def item_description(self, item):
return item.short
def item_pubdate(self, item):
return item.created_on
def item_updateddate(self, item):
return item.modified_on
def item_categories(self, item):
return [category.title for category in item.category.hierarchy()]
def item_author_name(self, item):
return "{first_name} {last_name}".format(
first_name=item.author.first_name,
last_name=item.author.last_name,
)
def item_author_email(self, item):
return item.author.email
|
bunchesofdonald/django-hermes
|
hermes/feeds.py
|
Python
|
mit
| 1,090
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import warnings
from string import Template
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.template import RequestContext
from django.test import TestCase
from django.test.client import Client
from django.test.utils import override_settings
from django.utils import unittest
from django.utils.encoding import smart_str
from selenium.webdriver.phantomjs.webdriver import WebDriver
from utils import script_prefix
# Raise errors on DeprecationWarnings
warnings.simplefilter('error', DeprecationWarning)
class AbstractJSReverseTestCase(object):
client = None
urls = 'tests.test_urls'
@classmethod
def setUpClass(cls):
if hasattr(django, 'setup'):
# for django >= 1.7
django.setup()
cls.selenium = WebDriver()
super(AbstractJSReverseTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(AbstractJSReverseTestCase, cls).tearDownClass()
def setUp(self):
self.client = Client()
def assertEqualJSUrlEval(self, url_call, expected_url):
response = self.client.post('/jsreverse/')
self.assertEqual(self.selenium.execute_script('%s return %s;' % (smart_str(response.content), url_call)),
expected_url)
class JSReverseViewTestCaseMinified(AbstractJSReverseTestCase, TestCase):
def test_view_no_url_args(self):
self.assertEqualJSUrlEval('Urls.test_no_url_args()', '/test_no_url_args/')
def test_view_one_url_arg(self):
self.assertEqualJSUrlEval('Urls.test_one_url_args("arg_one")', '/test_one_url_args/arg_one/')
def test_view_two_url_args(self):
self.assertEqualJSUrlEval('Urls.test_two_url_args("arg_one", "arg_two")', '/test_two_url_args/arg_one-arg_two/')
def test_view_optional_url_arg(self):
self.assertEqualJSUrlEval('Urls.test_optional_url_arg("arg_two")',
'/test_optional_url_arg/2_arg_two/')
self.assertEqualJSUrlEval('Urls.test_optional_url_arg("arg_one", "arg_two")',
'/test_optional_url_arg/1_arg_one-2_arg_two/')
def test_unicode_url_name(self):
self.assertEqualJSUrlEval('Urls.test_unicode_url_name()', '/test_unicode_url_name/')
@override_settings(JS_REVERSE_JS_VAR_NAME='Foo')
def test_js_var_name_changed_valid(self):
self.assertEqualJSUrlEval('Foo.test_no_url_args()', '/test_no_url_args/')
@override_settings(JS_REVERSE_JS_VAR_NAME='1test')
def test_js_var_name_changed_to_invalid(self):
with self.assertRaises(ImproperlyConfigured):
self.client.post('/jsreverse/')
def test_namespaces(self):
self.assertEqualJSUrlEval('Urls["ns1:test_two_url_args"]("arg_one", "arg_two")',
'/ns1/test_two_url_args/arg_one-arg_two/')
self.assertEqualJSUrlEval('Urls["ns2:test_two_url_args"]("arg_one", "arg_two")',
'/ns2/test_two_url_args/arg_one-arg_two/')
def test_namespaces_with_args(self):
self.assertEqualJSUrlEval('Urls["ns_arg:test_two_url_args"]("arg_one", "arg_two", "arg_three")',
'/nsarg_one/test_two_url_args/arg_two-arg_three/')
def test_namespaces_nested(self):
self.assertEqualJSUrlEval('Urls["nestedns:ns1:test_two_url_args"]("arg_one", "arg_two")',
'/nestedns/ns1/test_two_url_args/arg_one-arg_two/')
def test_content_type(self):
response = self.client.post('/jsreverse/')
self.assertEqual(response['Content-Type'], 'application/javascript')
@override_settings(JS_REVERSE_JS_MINIFY='invalid')
def test_js_minfiy_changed_to_invalid(self):
with self.assertRaises(ImproperlyConfigured):
self.client.post('/jsreverse/')
def test_namespace_in_urls(self):
response = self.client.get('/jsreverse/')
self.assertContains(response, 'exclude_namespace', status_code=200)
@override_settings(JS_REVERSE_EXCLUDE_NAMESPACES=['exclude_namespace'])
def test_namespace_not_in_response(self):
response = self.client.get('/jsreverse/')
self.assertNotContains(response, 'exclude_namespace', status_code=200)
def test_script_prefix(self):
with script_prefix('/foobarlala/'):
self.assertEqualJSUrlEval('Urls["nestedns:ns1:test_two_url_args"]("arg_one", "arg_two")',
'/foobarlala/nestedns/ns1/test_two_url_args/arg_one-arg_two/')
def test_duplicate_name(self):
self.assertEqualJSUrlEval('Urls.test_duplicate_name("arg_one")',
'/test_duplicate_name/arg_one/')
self.assertEqualJSUrlEval('Urls.test_duplicate_name("arg_one", "arg_two")',
'/test_duplicate_name/arg_one-arg_two/')
def test_duplicate_argcount(self):
self.assertEqualJSUrlEval('Urls.test_duplicate_argcount ({arg_one: "arg_one"})',
'/test_duplicate_argcount/arg_one-/')
self.assertEqualJSUrlEval('Urls.test_duplicate_argcount ({arg_two: "arg_two"})',
'/test_duplicate_argcount/-arg_two/')
self.assertEqualJSUrlEval('Urls.test_duplicate_argcount ({arg_one: "arg_one", arg_two: "arg_two"})',
'/test_duplicate_argcount/arg_one-arg_two/')
@override_settings(JS_REVERSE_JS_MINIFY=False)
class JSReverseViewTestCaseNotMinified(JSReverseViewTestCaseMinified):
def test_minification(self):
js_not_minified = smart_str(self.client.post('/jsreverse/').content)
with override_settings(JS_REVERSE_JS_MINIFY=True):
js_minified = smart_str(self.client.post('/jsreverse/').content)
self.assertTrue(len(js_minified) < len(js_not_minified))
class JSReverseViewTestCaseGlobalObjectName(JSReverseViewTestCaseMinified):
def test_global_object_name_default(self):
js_content = smart_str(self.client.post('/jsreverse/').content)
self.assertTrue(js_content.startswith('this.'))
@override_settings(JS_REVERSE_JS_GLOBAL_OBJECT_NAME='window')
def test_global_object_name_change(self):
js_content = smart_str(self.client.post('/jsreverse/').content)
self.assertTrue(js_content.startswith('window.'))
@override_settings(JS_REVERSE_JS_GLOBAL_OBJECT_NAME='1test')
def test_global_object_name_change_invalid_identifier(self):
with self.assertRaises(ImproperlyConfigured):
self.client.post('/jsreverse/')
class JSReverseStaticFileSaveTest(AbstractJSReverseTestCase, TestCase):
def test_reverse_js_file_save(self):
call_command('collectstatic_js_reverse')
path = os.path.join(settings.STATIC_ROOT, 'django_js_reverse', 'js', 'reverse.js')
f = open(path)
content1 = f.read()
if hasattr(content1, 'decode'):
content1 = content1.decode()
r2 = self.client.get('/jsreverse/')
content2 = r2.content
if hasattr(content2, 'decode'):
content2 = content2.decode()
self.assertEqual(len(content1), len(content2), 'Static file don\'t match http response content_1')
self.assertEqual(content1, content2, 'Static file don\'t match http response content_2')
# test for excpetion if STATIC_ROOT is not set
with override_settings(STATIC_ROOT=None):
with self.assertRaises(ImproperlyConfigured):
call_command('collectstatic_js_reverse')
def test_script_prefix(self):
script_prefix = '/test/foo/bar/'
with override_settings(JS_REVERSE_SCRIPT_PREFIX=script_prefix):
self.assertEqualJSUrlEval('Urls.test_no_url_args()', '{0}test_no_url_args/'.format(script_prefix))
class JSReverseTemplateTagTest(AbstractJSReverseTestCase, TestCase):
def test_tpl_tag_with_request_in_contect(self):
from django_js_reverse.templatetags.js_reverse import js_reverse_inline
context_instance = RequestContext(self.client.request)
Template("{%% load %s %%}{%% %s %%}" % ('js_reverse', js_reverse_inline(context_instance)))
def test_tpl_tag_without_request_in_contect(self):
from django_js_reverse.templatetags.js_reverse import js_reverse_inline
context_instance = RequestContext(None)
Template("{%% load %s %%}{%% %s %%}" % ('js_reverse', js_reverse_inline(context_instance)))
if __name__ == '__main__':
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..') + os.sep)
unittest.main()
|
PokaInc/django-js-reverse
|
tests/unit_tests.py
|
Python
|
mit
| 8,888
|
#!/usr/bin/env python3
# This script is executed by Jasy during creating new applications
# Test for custom questions
config.set("custom", [1,2,3])
# config.ask("What's your name", "user.name", "String")
# config.ask("How old are you", "user.age", "Integer")
# config.ask("What's PI", "pi", "Float", default=3.14, required=False)
# config.ask("Increment from 1..3", "incr", "List")
# Test for io features
# file.mv("jasyscript.py", "foobarbaz.py")
|
dadicool/jasy-skeleton
|
skeleton/test/test-yaml-project/jasycreate.py
|
Python
|
mit
| 451
|
import datetime
import unittest
from twilio.base import serialize, values
class Iso8601DateTestCase(unittest.TestCase):
def test_unset(self):
value = values.unset
actual = serialize.iso8601_date(value)
self.assertEqual(values.unset, actual)
def test_datetime(self):
value = datetime.datetime(2015, 1, 2, 12, 0, 0, 0)
actual = serialize.iso8601_date(value)
self.assertEqual('2015-01-02', actual)
def test_datetime_without_time(self):
value = datetime.datetime(2015, 1, 2)
actual = serialize.iso8601_date(value)
self.assertEqual('2015-01-02', actual)
def test_date(self):
value = datetime.date(2015, 1, 2)
actual = serialize.iso8601_date(value)
self.assertEqual('2015-01-02', actual)
def test_str(self):
actual = serialize.iso8601_date('2015-01-02')
self.assertEqual('2015-01-02', actual)
class Iso8601DateTimeTestCase(unittest.TestCase):
def test_unset(self):
value = values.unset
actual = serialize.iso8601_datetime(value)
self.assertEqual(values.unset, actual)
def test_datetime(self):
value = datetime.datetime(2015, 1, 2, 3, 4, 5, 6)
actual = serialize.iso8601_datetime(value)
self.assertEqual('2015-01-02T03:04:05Z', actual)
def test_datetime_without_time(self):
value = datetime.datetime(2015, 1, 2)
actual = serialize.iso8601_datetime(value)
self.assertEqual('2015-01-02T00:00:00Z', actual)
def test_date(self):
value = datetime.date(2015, 1, 2)
actual = serialize.iso8601_datetime(value)
self.assertEqual('2015-01-02T00:00:00Z', actual)
def test_str(self):
actual = serialize.iso8601_datetime('2015-01-02T03:04:05Z')
self.assertEqual('2015-01-02T03:04:05Z', actual)
class PrefixedCollapsibleMapTestCase(unittest.TestCase):
def test_unset(self):
value = values.unset
actual = serialize.prefixed_collapsible_map(value, 'Prefix')
self.assertEqual({}, actual)
def test_single_key(self):
value = {
'foo': 'bar'
}
actual = serialize.prefixed_collapsible_map(value, 'Prefix')
self.assertEqual({
'Prefix.foo': 'bar'
}, actual)
def test_nested_key(self):
value = {
'foo': {
'bar': 'baz'
}
}
actual = serialize.prefixed_collapsible_map(value, 'Prefix')
self.assertEqual({
'Prefix.foo.bar': 'baz'
}, actual)
def test_multiple_keys(self):
value = {
'watson': {
'language': 'en',
'alice': 'bob'
},
'foo': 'bar'
}
actual = serialize.prefixed_collapsible_map(value, 'Prefix')
self.assertEqual({
'Prefix.watson.language': 'en',
'Prefix.watson.alice': 'bob',
'Prefix.foo': 'bar'
}, actual)
def test_list(self):
value = [
'foo',
'bar'
]
actual = serialize.prefixed_collapsible_map(value, 'Prefix')
self.assertEqual({}, actual)
class ObjectTestCase(unittest.TestCase):
def test_object(self):
actual = serialize.object({'twilio': 'rocks'})
self.assertEqual('{"twilio": "rocks"}', actual)
def test_list(self):
actual = serialize.object(['twilio', 'rocks'])
self.assertEqual('["twilio", "rocks"]', actual)
def test_does_not_change_other_types(self):
actual = serialize.object('{"attribute":"value"}')
self.assertEqual('{"attribute":"value"}', actual)
class MapTestCase(unittest.TestCase):
def test_maps_func_to_list(self):
actual = serialize.map([1, 2, 3], lambda e: e * 2)
self.assertEqual([2, 4, 6], actual)
def test_does_not_change_other_types(self):
actual = serialize.map("abc", lambda e: e * 2)
self.assertEqual("abc", actual)
actual = serialize.map(123, lambda e: e * 2)
self.assertEqual(123, actual)
actual = serialize.map({'some': 'val'}, lambda e: e * 2)
self.assertEqual({'some': 'val'}, actual)
|
tysonholub/twilio-python
|
tests/unit/base/test_serialize.py
|
Python
|
mit
| 4,219
|
# Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from six import iteritems
import frappe
from frappe import _
field_map = {
"Contact": ["first_name", "last_name", "address", "phone", "mobile_no", "email_id", "is_primary_contact"],
"Address": ["address_line1", "address_line2", "city", "state", "pincode", "country", "is_primary_address"]
}
def execute(filters=None):
columns, data = get_columns(filters), get_data(filters)
return columns, data
def get_columns(filters):
return [
"{reference_doctype}:Link/{reference_doctype}".format(reference_doctype=filters.get("reference_doctype")),
"Address Line 1",
"Address Line 2",
"City",
"State",
"Postal Code",
"Country",
"Is Primary Address:Check",
"First Name",
"Last Name",
"Address",
"Phone",
"Email Id",
"Is Primary Contact:Check"
]
def get_data(filters):
data = []
reference_doctype = filters.get("reference_doctype")
reference_name = filters.get("reference_name")
return get_reference_addresses_and_contact(reference_doctype, reference_name)
def get_reference_addresses_and_contact(reference_doctype, reference_name):
data = []
filters = None
reference_details = frappe._dict()
if not reference_doctype:
return []
if reference_name:
filters = {"name": reference_name}
reference_list = [d[0] for d in frappe.get_list(reference_doctype, filters=filters, fields=["name"], as_list=True)]
for d in reference_list:
reference_details.setdefault(d, frappe._dict())
reference_details = get_reference_details(reference_doctype, "Address", reference_list, reference_details)
reference_details = get_reference_details(reference_doctype, "Contact", reference_list, reference_details)
for reference_name, details in iteritems(reference_details):
addresses = details.get("address", [])
contacts = details.get("contact", [])
if not any([addresses, contacts]):
result = [reference_name]
result.extend(add_blank_columns_for("Address"))
result.extend(add_blank_columns_for("Contact"))
data.append(result)
else:
addresses = list(map(list, addresses))
contacts = list(map(list, contacts))
max_length = max(len(addresses), len(contacts))
for idx in range(0, max_length):
result = [reference_name]
result.extend(addresses[idx] if idx < len(addresses) else add_blank_columns_for("Address"))
result.extend(contacts[idx] if idx < len(contacts) else add_blank_columns_for("Contact"))
data.append(result)
return data
def get_reference_details(reference_doctype, doctype, reference_list, reference_details):
filters = [
["Dynamic Link", "link_doctype", "=", reference_doctype],
["Dynamic Link", "link_name", "in", reference_list]
]
fields = ["`tabDynamic Link`.link_name"] + field_map.get(doctype, [])
records = frappe.get_list(doctype, filters=filters, fields=fields, as_list=True)
temp_records = list()
for d in records:
temp_records.append(d[1:])
if not reference_list:
frappe.throw(_("No records present in {0}").format(reference_doctype))
reference_details[reference_list[0]][frappe.scrub(doctype)] = temp_records
return reference_details
def add_blank_columns_for(doctype):
return ["" for field in field_map.get(doctype, [])]
|
adityahase/frappe
|
frappe/contacts/report/addresses_and_contacts/addresses_and_contacts.py
|
Python
|
mit
| 3,308
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from setuptools import Extension
from setuptools import setup
setup(
name=str('project_with_c'),
version='0.1.0',
url='example.com',
author='nobody',
author_email='nobody@example.com',
ext_modules=[Extension(str('project_with_c'), [str('project_with_c.c')])],
entry_points={
'console_scripts': [
'c-extension-script = project_with_c:hello_world',
],
},
)
|
Yelp/venv-update
|
tests/testing/packages/project_with_c/setup.py
|
Python
|
mit
| 535
|
#!/usr/bin/env python2
# coding: utf-8
# vim: set ts=4 sw=4 expandtab sts=4:
# Copyright (c) 2011-2014 Christian Geier & contributors
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
syncs the remote database to the local db
"""
from pycarddav import carddav
from pycarddav import backend
import logging
__all__ = ['sync']
def sync(conf):
"""this should probably be seperated from the class definitions"""
syncer = carddav.PyCardDAV(conf.account.resource,
user=conf.account.user,
passwd=conf.account.passwd,
write_support=conf.account.write_support,
verify=conf.account.verify,
auth=conf.account.auth)
my_dbtool = backend.SQLiteDb(db_path=conf.sqlite.path,
encoding="utf-8",
errors="stricts",
debug=conf.debug)
# sync:
abook = syncer.get_abook() # type(abook): dict
my_dbtool.check_account_table(conf.account.name, conf.account.resource)
for href, etag in abook.iteritems():
if my_dbtool.needs_update(href, conf.account.name, etag=etag):
logging.debug("getting %s etag: %s", href, etag)
vcard = syncer.get_vcard(href)
my_dbtool.update(vcard, conf.account.name, href=href, etag=etag)
remote_changed = False
# for now local changes overwritten by remote changes
logging.debug("looking for locally changed vcards...")
hrefs = my_dbtool.get_changed(conf.account.name)
for href in hrefs:
try:
logging.debug("trying to update %s", href)
card = my_dbtool.get_vcard_from_db(href, conf.account.name)
logging.debug("%s", my_dbtool.get_etag(href, conf.account.name))
syncer.update_vcard(card.vcf, href, None)
my_dbtool.reset_flag(href, conf.account.name)
remote_changed = True
except carddav.NoWriteSupport:
logging.info('failed to upload changed card {0}, '
'you need to enable write support, '
'see the documentation', href)
# uploading
hrefs = my_dbtool.get_new(conf.account.name)
for href in hrefs:
try:
logging.debug("trying to upload new card %s", href)
card = my_dbtool.get_vcard_from_db(href, conf.account.name)
(href_new, etag_new) = syncer.upload_new_card(card.vcf)
my_dbtool.update_href(href,
href_new,
conf.account.name,
status=backend.OK)
remote_changed = True
except carddav.NoWriteSupport:
logging.info('failed to upload card %s, '
'you need to enable write support, '
'see the documentation', href)
# deleting locally deleted cards on the server
hrefs_etags = my_dbtool.get_marked_delete(conf.account.name)
for href, etag in hrefs_etags:
try:
logging.debug('trying to delete card %s', href)
syncer.delete_vcard(href, etag)
my_dbtool.delete_vcard_from_db(href, conf.account.name)
remote_changed = True
except carddav.NoWriteSupport:
logging.info('failed to delete card {0}, '
'you need to enable write support, '
'see the documentation'.format(href))
# detecting remote-deleted cards
# is there a better way to compare a list of unicode() with a list of str()
# objects?
if remote_changed:
abook = syncer.get_abook() # type (abook): dict
r_href_account_list = my_dbtool.get_all_href_from_db_not_new(
[conf.account.name])
delete = set([href for href, account in r_href_account_list]).difference(abook.keys())
for href in delete:
my_dbtool.delete_vcard_from_db(href, conf.account.name)
|
Savar/pycarddav
|
pycarddav/controllers/sync.py
|
Python
|
mit
| 5,056
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/space/weapon/missile/shared_wpn_imagerec_missile_mk1.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/draft_schematic/space/weapon/missile/shared_wpn_imagerec_missile_mk1.py
|
Python
|
mit
| 474
|
from botnet.helpers import load_json, save_json, is_channel_name
def test_load_save_json(tmp_file):
data = {'key': 'value'}
save_json(tmp_file, data)
loaded_data = load_json(tmp_file)
assert loaded_data == data
def test_is_channel_name():
assert is_channel_name('#channel')
assert not is_channel_name('')
assert not is_channel_name('nickname_')
|
boreq/botnet
|
tests/test_helpers.py
|
Python
|
mit
| 377
|
import json
import io
from copy import deepcopy
import pytest
from NetscoutArborSightline import NetscoutClient, \
fetch_incidents_command, list_alerts_command, alert_annotation_list_command, mitigation_list_command, \
mitigation_template_list_command, router_list_command, tms_group_list_command, managed_object_list_command, \
mitigation_create_command, clean_links, validate_json_arg, build_human_readable
import demistomock as demisto
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
# from Packs
def util_load_json(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return json.loads(f.read())
client = NetscoutClient(base_url='dummy_url', verify=False, proxy=False, first_fetch='3 days', max_fetch=10)
http_responses = util_load_json('test_data/http_responses.json')
command_results = util_load_json('test_data/command_results.json')
@pytest.fixture(autouse=True)
def setup(mocker):
mocker.patch.object(demisto, 'debug')
def test_fetch_incidents_command(mocker):
"""
Given:
- NetscoutClient client.
When:
- Fetching incidents.
Then:
- Ensure that the incidents returned are as expected.
"""
alerts_http_response = http_responses['incidents']
alerts_command_results = command_results['fetched_incidents']
mocker.patch.object(client, "list_alerts", return_value=alerts_http_response)
mocker.patch.object(client, "calculate_amount_of_incidents", return_value=40)
mocker.patch.object(demisto, 'incidents')
fetch_incidents_command(client)
demisto.incidents.assert_called_with(alerts_command_results)
@pytest.mark.parametrize(
'function_to_mock, function_to_test, args, http_response_key, expected_command_results_key', [
('list_alerts', list_alerts_command, {}, 'incidents', 'get_incidents'),
('get_alert', list_alerts_command, {'alert_id': 1}, 'incident', 'get_incident'),
('get_annotations', alert_annotation_list_command, {'alert_id': '2009'}, 'annotations', 'list_annotations'),
('list_mitigations', mitigation_list_command, {'limit': '3'}, 'mitigations', 'list_mitigations'),
('create_mitigation', mitigation_create_command,
{"description": "just desc", "ip_version": "IPv4", "name": "test_mit", "ongoing": "true",
"sub_object": "{\"protection_prefixes\": [\"192.0.2.0/24\"]}", "sub_type": "flowspec"}, 'mitigation',
'create_mitigation'),
('mitigation_template_list', mitigation_template_list_command, {}, 'mitigation_templates',
'list_mitigation_templates'),
('router_list', router_list_command, {}, 'routers', 'list_routers'),
('managed_object_list', managed_object_list_command, {}, 'managed_objects', 'list_managed_objects'),
('tms_group_list', tms_group_list_command, {}, 'tms_groups', 'list_tms_group'),
])
def test_commands(mocker, function_to_mock, function_to_test, args, http_response_key,
expected_command_results_key):
"""
Given:
- NetscoutClient client.
When:
- Case A: Calling the list_alerts_command function.
- Case B: Calling the list_alerts_command function with a specific alert.
- Case C: Calling the alert_annotation_list_command function.
- Case D: Calling the mitigation_list_command function with a specific alert.
- Case E: Calling the mitigation_create_command function with mitigation details.
- Case F: Calling the mitigation_template_list_command function.
- Case G: Calling the router_list_command function.
- Case H: Calling the managed_object_list_command function.
- Case I: Calling the tms_group_list_command function.
Then:
- Case A: Assert that the command results has the relevant alerts with the relevant extracted fields.
- Case B: Assert that the command results has only one alert and that it has the relevant extracted fields.
- Case C: Assert that the command results has the relevant annotations with the relevant extracted fields.
- Case D: Assert that the command results contains the alert ID and has the relevant mitigations with the relevant
extracted fields.
- Case E: Assert that the command results has the newly create mitigation with its relevant extracted fields.
- Case F: Assert that the command results has the relevant mitigation template list with the relevant extracted
fields.
- Case G: Assert that the command results has the relevant router list with the relevant extracted fields.
- Case H: Assert that the command results has the relevant list of manged groups with the relevant extracted
fields.
- Case I: Assert that the command results has the relevant list of tms groups with the relevant extracted fields.
"""
mocked_http_response = http_responses[http_response_key]
expected_command_results = command_results[expected_command_results_key]
mocker.patch.object(client, function_to_mock, return_value=mocked_http_response)
command_result: CommandResults = function_to_test(client, args)
assert command_result.outputs == expected_command_results
@pytest.mark.parametrize('http_response_key, expected_number_of_pages', [
('amount_of_incidents_vanilla_case', 25),
('amount_of_incidents_one_result', 1),
('amount_of_incidents_no_results', 0)
])
def test_calculate_amount_of_incidents(mocker, http_response_key, expected_number_of_pages):
"""
Given:
- Case A: A "regular" query that returns response with 25 pages.
- Case B: A query that returns response with only one page.
- Case C: A query that response with no pages and data.
When:
Calculating the amount of relevant incidents by counting the amount of pages
Then:
- Case A: Assert the the amount of incidents calculated is 25
- Case B: Assert the the amount of incidents calculated is 1
- Case C: Assert the the amount of incidents calculated is 0
"""
mocked_http_response = http_responses[http_response_key]
mocker.patch.object(client, 'list_alerts', return_value=mocked_http_response)
number_of_pages = client.calculate_amount_of_incidents('', {})
assert number_of_pages == expected_number_of_pages
def test_calculate_amount_of_incidents_raise_error(mocker):
mocked_http_response = http_responses['amount_of_incidents_broken_last_page']
mocker.patch.object(client, 'list_alerts', return_value=mocked_http_response)
with pytest.raises(DemistoException,
match='Could not calculate page size, last page number was not found:\n'
'https://content.demisto.works:57585/api/sp/v7/alerts/?'):
client.calculate_amount_of_incidents('', {})
@pytest.mark.parametrize('object_to_clean', [
({}),
({'some_key': 'some_value'}),
({'some_key': 'some_value', 'links': {'self': 'some_link'}}),
([{'some_key': 'some_value', 'links': {'self': 'some_link'}}]),
({'some_key': {'links': {'self': 'some_link'}}}),
({'some_key': [{'links': {'self': 'some_link'}}]}),
({'some_key': [{'links': {'self': 'some_link'}}, {'links': {'self': 'some_other_link'}}]}),
([{'some_key': [{'links': {'self': 'some_link'}}, {'links': {'self': 'some_other_link'}}]}]),
])
def test_clean_links(object_to_clean):
"""
Given:
- Case A: An empty dict.
- Case B: A dict with no 'links' key in it.
- Case C: A dict with a 'links' key in it.
- Case D: A list containing a dict with a 'links' key in it.
- Case E: A dict containing another dict with a 'links' key in it.
- Case F: A dict containing a list containing another dict with a 'links' key in it.
- Case F: A dict containing a list containing additional dict with a 'links' key in them.
- Case F: A list containing a dict containing another list containing additional dict with a 'links' key in them.
When:
Running the clean_links function
Then:
No links key appear in transformed dict (checking by parsing the dict into a string)
"""
copy_of_object = deepcopy(object_to_clean)
clean_links(copy_of_object)
str_result = json.dumps(copy_of_object)
assert str_result.find('link') == -1
def test_validate_json_arg():
"""
Given:
- A string representing a json object.
When:
- Validating a string has a dict structure
Then:
- Ensure no parsing error was returned.
"""
validate_json_arg('{"some_key": "some_value"}', '')
def test_validate_json_arg_raise_error():
"""
Given:
- A string that has no json format.
When:
- Validating a string has a json structure.
Then:
- Ensure a parsing error was raised
"""
with pytest.raises(DemistoException, match='The value given in the argument is not a valid JSON format:\n'
'{"some_key" "some_value"}'):
validate_json_arg('{"some_key" "some_value"}', '')
@pytest.mark.parametrize('object_to_build, expected_result', [
({}, {}),
({'attributes': {'key_1': 'val_1'}, 'key_2': 'val_2'},
{'key_1': 'val_1', 'key_2': 'val_2'}),
({'attributes': {'key_1': 'val_1'}, 'key_2': 'val_2', 'relationships': [{'key_3': 'val_3'}],
'subobject': {'key_4': 'val_4'}}, {'key_1': 'val_1', 'key_2': 'val_2'})
])
def test_build_human_readable(object_to_build, expected_result):
"""
Given:
- Case A: A dict with two keys: 'attributes' and 'key_2`.
- Case B: A dict with four keys: 'attributes', 'relationships', 'subobject' and 'key_2'.
When:
- Building the human readable from a response dict.
Then:
- Case A:
1. Keys under the 'attributes' key are extracted to the root level.
2. The second key - 'key_2' still appears in the object.
- Case B: Ensure that:
1. Keys under the 'attributes' key are extracted to the root level.
2. The second key - 'key_2' still appears in the object.
3. That the 'relationships' and 'subobject' keys are missing from the object.
"""
result = build_human_readable(object_to_build)
assert result == expected_result
@pytest.mark.parametrize('args_dict, expected_json_str', [
(
{
"limit": "10",
"page": "2",
"alert_id": "123",
"alert_class": "bgp",
"alert_type": "bgp_hijack",
"classification": "Flash Crowd",
"importance": "1",
"ongoing": "true",
"start_time": "2021-01-11T13:15:00",
"stop_time": "2021-01-12T13:15:00",
},
'/data/attributes/limit=10 AND /data/attributes/page=2 AND /data/attributes/alert_id=123 AND '
'/data/attributes/alert_class=bgp AND /data/attributes/alert_type=bgp_hijack AND '
'/data/attributes/classification=Flash Crowd AND /data/attributes/importance=1 AND '
'/data/attributes/ongoing=true AND /data/attributes/start_time=2021-01-11T13:15:00 AND '
'/data/attributes/stop_time=2021-01-12T13:15:00'
),
(
{
"importance": "1",
"importance_operator": "=",
"start_time": "2021-01-11T13:15:00",
"start_time_operator": ">",
"stop_time": "2021-01-12T13:15:00",
"stop_time_operator": "<"
},
'/data/attributes/importance=1 AND /data/attributes/start_time>2021-01-11T13:15:00 AND '
'/data/attributes/stop_time<2021-01-12T13:15:00'
)
])
def test_build_relationships(args_dict, expected_json_str):
"""
Given:
- Case A: A dict of possible relationship filters`.
- Case B: A dict of possible relationship filters in addition to special allowed operators.
When:
- Building a relationship string representation to be sent in the url query.
Then:
- Case A: Assert that all filters are uses the `=` operator and are chained using the `AND` operator.
- Case B: Assert that start_time uses the '>' operator, stop_time uses the '<' operator and importance uses the '='
operator.
"""
result = client.build_data_attribute_filter(args_dict)
assert result == expected_json_str
|
demisto/content
|
Packs/NetscoutArborSightline/Integrations/NetscoutArborSightline/NetscoutArborSightline_test.py
|
Python
|
mit
| 12,191
|
"""
Experiment with the Python debugger, pdb, from the command line
"""
def combine(s1,s2): # define subroutine combine, which...
s3 = s1 + s2 + s1 # sandwiches s2 between copies of s1, ...
s3 = '"' + s3 +'"' # encloses it in double quotes,...
return s3 # and returns it.
a = "aaa"
b = "bbb"
c = "ccc"
final = combine(a,b)
print final
|
sharkySharks/PythonForDevs
|
DemoProgs/pdb2.py
|
Python
|
mit
| 384
|
"""
@file comm_zigbee_basic.py
"""
##
# @addtogroup zigbee
# @brief This is component
# @{
# @addtogroup comm_zigbee
# @brief This is comm_zigbee module
# @{
##
import time
import os
import string
from oeqa.runtime.zigbee import zigbee
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.helper import shell_cmd_timeout
from oeqa.utils.decorators import tag
@tag(TestType="FVT")
class ZigBeeBasic(oeRuntimeTest):
"""
@class ZigBeeBasic
"""
def setUp(self):
''' initialize zigbee class
@fn setUp
@param self
@return
'''
self.zigbee = zigbee.ZigBeeFunction(self.target)
def tearDown(self):
''' initialize zigbee class
@fn setUp
@param self
@return
'''
self.zigbee.remove_atmel_mode()
@tag(FeatureID="IOTOS-1220")
def test_insert_atmel_module(self):
'''Insert atmel module to enable 802.15.4
@fn test_insert_atmel_module
@param self
@return
'''
self.zigbee.remove_cc2520_mode()
self.zigbee.insert_atmel_mode()
##
# @}
# @}
##
|
ostroproject/meta-iotqa
|
lib/oeqa/runtime/zigbee/comm_zigbee_basic.py
|
Python
|
mit
| 1,113
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_bth_spynet_pilot_m_03.iff"
result.attribute_template_id = 9
result.stfName("obj_n","unknown_creature")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
anhstudios/swganh
|
data/scripts/templates/object/mobile/shared_dressed_bth_spynet_pilot_m_03.py
|
Python
|
mit
| 452
|
# -*- coding: utf-8 -*-
import demistomock as demisto
from ExtractDomainAndFQDNFromUrlAndEmail import extract_fqdn, main
import pytest
@pytest.mark.parametrize('input,fqdn', [ # noqa: E501 disable-secrets-detection
('www.static.attackiqtes.com', 'www.static.attackiqtes.com'),
('http:www.static.attackiqtes.com', 'www.static.attackiqtes.com'),
('attackiqtes.co.il', 'attackiqtes.co.il'),
('ftp://www.test.com/test2/dev', 'www.test.com'),
('http://www.test.com/test2/dev', 'www.test.com'),
('www.test.fake', ''),
('www[.]demisto[.]com', 'www.demisto.com'),
('www[.]demisto[.]test2.com', 'www.demisto.test2.com'),
('test.zip', ''),
('https%3A%2F%2Fdulunggakada40[.]com', 'dulunggakada40.com'),
('https%3A%2F%2Fpath.test.com', 'path.test.com'),
('https://urldefense.com/v3/__http://survey.lavulcamktg.cl/index.php/783758', 'survey.lavulcamktg.cl'),
('this.is.test.com', 'this.is.test.com'),
('caseapi.phishlabs.com', 'caseapi.phishlabs.com'),
('www.bücher.de', 'www.bücher.de'),
('https://urldefense.proofpoint.com/v2/url?u=http-3A__go.getpostman.com_y4wULsdG0h0DDMY0Dv00100&d=DwMFaQ&c'
'=ywDJJevdGcjv4rm9P3FcNg&r=s5kA2oIAQRXsacJiBKmTORIWyRN39ZKhobje2GyRgNs&m'
'=vN1dVSiZvEoM9oExtQqEptm9Dbvq9tnjACDZzrBLaWI&s=zroN7KQdBCPBOfhOmv5SP1DDzZKZ1y9I3x4STS5PbHA&e=',
'go.getpostman.com'), # noqa: E501
('www[.]demisto[.]com', 'www.demisto.com'),
('hxxp://www[.]demisto[.]com', 'www.demisto.com'),
('www[.]demisto.test[.]com', 'www.demisto.test.com'),
('https://emea01.safelinks.protection.outlook.com/?url=https%3A%2F%2Ftwitter.com%2FPhilipsBeLux&data=02|01'
'||cb2462dc8640484baf7608d638d2a698|1a407a2d76754d178692b3ac285306e4|0|0|636758874714819880&sdata'
'=dnJiphWFhnAKsk5Ps0bj0p%2FvXVo8TpidtGZcW6t8lDQ%3D&reserved=0%3E%5bcid:image003.gif@01CF4D7F.1DF62650%5d'
'%3C', 'twitter.com'), # noqa: E501 disable-secrets-detection
]) # noqa: E124
def test_extract_fqdn_or_domain(input, fqdn):
extracted_fqdn = extract_fqdn(input)
# extracted_domain = extract_fqdn_or_domain(input, is_domain=True)
assert extracted_fqdn == fqdn
# assert extracted_domain == domain
def test_extract_fqdn_or_domain_empty_indicators(mocker):
mocker.patch.object(demisto, 'args', return_value={'input': '1Ab.Vt'})
mocker.patch.object(demisto, 'results')
main()
results = demisto.results.call_args[0]
assert results[0] == [{'Contents': [], 'ContentsFormat': 'json', 'Type': 1}]
|
demisto/content
|
Packs/CommonScripts/Scripts/ExtractDomainAndFQDNFromUrlAndEmail/ExtractDomainAndFQDNFromUrlAndEmail_test.py
|
Python
|
mit
| 2,497
|
"""
Tests for the application infrastructure
"""
from flask import json
from nose.tools import assert_equal
from .helpers import BaseApplicationTest
class TestApplication(BaseApplicationTest):
def test_index(self):
response = self.client.get('/')
assert 200 == response.status_code
assert 'links' in json.loads(response.get_data())
def test_404(self):
response = self.client.get('/not-found')
assert 404 == response.status_code
def test_bearer_token_is_required(self):
self.do_not_provide_access_token()
response = self.client.get('/')
assert 401 == response.status_code
assert 'WWW-Authenticate' in response.headers
def test_invalid_bearer_token_is_required(self):
self.do_not_provide_access_token()
response = self.client.get(
'/',
headers={'Authorization': 'Bearer invalid-token'})
assert 403 == response.status_code
def test_max_age_is_one_day(self):
response = self.client.get('/')
assert_equal(86400, response.cache_control.max_age)
|
mtekel/digitalmarketplace-api
|
tests/app/test_application.py
|
Python
|
mit
| 1,102
|
for i in range(1<<16):
print(bin(i).count("1"),end=",")
|
yekm/bench
|
tasks/despace/despacer/scripts/counts.py
|
Python
|
mit
| 59
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Copyright (c) 2012 University of Oxford
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from redis import Redis
from redis.exceptions import ConnectionError
from time import sleep
WORKERPREFIX = "temp"
HOST = "localhost"
PORT = 6379
DB = 0
import logging
logger = logging.getLogger("redisqueue")
logger.setLevel(logging.INFO)
# create console handler and set level to debug
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
"""Simple wrapper around a redis queue that gives methods in line with the other Queue-style classes"""
class RedisQueue(object):
def __init__(self, queuename, workername, db=DB, host=HOST, port=PORT, workerprefix=WORKERPREFIX, errorqueue=None):
self.host = host
if isinstance(port, str):
try:
self.port = int(port)
except ValueError:
self.port = PORT
else:
self.port = port
self.queuename = queuename
self.workername = workername
self.workeritem = ":".join([workerprefix, workername])
self.errorqueue = errorqueue
if not errorqueue:
self.errorqueue = queuename
self.db = db
self._initclient()
def _initclient(self):
logger.info("Initialising the redis queue %s for %s" % (self.queuename, self.workername))
logger.info("Host:%s port:%s DB:%s" % (self.host, self.port, self.db))
logger.debug("Debug messages detailing worker queue activity")
self._r = Redis(host=self.host, db=self.db, port=self.port)
def check_connection(self):
#sleep(1)
try:
self._r.info()
except ConnectionError:
self._initclient()
def __len__(self):
if self.inprogress():
return self._r.llen(self.queuename) + 1
else:
return self._r.llen(self.queuename)
def __getitem__(self, index):
#self.check_connection()
return self._r.lrange(self.queuename, index, index)
def inprogress(self):
#sleep(1)
#self.check_connection()
ip = self._r.lrange(self.workeritem, 0, 0)
if ip:
return ip.pop()
else:
return None
def task_complete(self):
#sleep(1)
#self.check_connection()
logger.debug("Task completed by worker %s" % self.workername)
return self._r.rpop(self.workeritem)
def task_failed(self):
#sleep(1)
#self.check_connection()
logger.error("Task FAILED by worker %s" % self.workername)
logger.debug(self.inprogress())
return self._r.rpoplpush(self.workeritem, self.errorqueue)
def push(self, item, to_queue=None):
#sleep(1)
#self.check_connection()
if to_queue:
logger.debug("{%s} put onto queue %s by worker %s" % (item, to_queue,self.workername))
return self._r.lpush(to_queue, item)
else:
logger.debug("{%s} put onto queue %s by worker %s" % (item, self.queuename,self.workername))
return self._r.lpush(self.queuename, item)
def pop(self):
#sleep(1)
#self.check_connection()
logger.debug("In pop - Queuename: %s, workeritem:%s"%(self.queuename, self.workeritem))
if self._r.llen(self.workeritem) == 0:
itemid = self._r.rpoplpush(self.queuename, self.workeritem)
if self.queuename != self.errorqueue:
self._r.lrem(self.errorqueue, itemid)
logger.debug("{%s} pulled from queue %s by worker %s" % (self.inprogress(), self.queuename,self.workername))
else:
logger.debug("{%s} pulled from temporary worker queue by worker %s" % (self.inprogress(), self.workername))
return self.inprogress()
|
benosteen/RDFDatabank
|
message_workers/redisqueue.py
|
Python
|
mit
| 4,630
|
import praw
import re
import requests
import imgurpython as imgur
urls = ['rateme']
CLIENT_ID = ""
CLIENT_SECRET = ""
r = praw.Reddit(user_agent='punchable_faces_hackbu2016')
with open("imgur_secrets.txt", "r") as secrets:
li = secrets.readline().strip()
CLIENT_ID = li[li.find("=")+1:]
li = secrets.readline().strip()
CLIENT_SECRET = li[li.find("=")+1:]
client = imgur.ImgurClient(CLIENT_ID, CLIENT_SECRET)
with open("neutralfaces.txt", "w+") as f:
for subreddit in urls:
submissions = r.get_subreddit(subreddit).get_hot(limit=1000)
valid_images = []
for submission in submissions:
if not submission.is_self:
is_imgur = re.match(r"https?://(m|i\.)?imgur\.com/", submission.url)
if is_imgur is not None:
# album
match = re.match(r"https?://i\.imgur\.com", submission.url)
if match:
valid_images.append(submission.url)
continue
match = re.match(r"https?://(m\.)?imgur\.com/a/(\w+)", submission.url)
if match:
try:
valid_images.extend([x.link for x in client.get_album_images(match.group(2))])
except imgur.helpers.error.ImgurClientError:
print("404 Not found: {}".format(submission.url))
continue
match = re.match(r"https?://(m\.)?imgur\.com/(\w+)$", submission.url)
if match:
try:
valid_images.append(client.get_image(match.group(2)).link)
except imgur.helpers.error.ImgurClientError:
print("404 Not found: {}".format(submission.url))
f.write("\n".join(valid_images))
|
jpalazz2/punchable-faces
|
scripts/collect_neutral_faces.py
|
Python
|
mit
| 1,889
|
class Conf(object):
_render_url = 'http://127.0.0.1:9009/render'
_render = True
# Indicates that we should rely on Django's settings as the
# canonical reference and use the above defaults as fallbacks.
# Proxying to django.conf.settings allows us to swap out Django's
# tests during tests
_PROXY_DJANGO_SETTINGS = False
@property
def RENDER_URL(self):
if not self._PROXY_DJANGO_SETTINGS:
return self._render_url
from django.conf import settings
if hasattr(settings, 'REACT'):
return settings.REACT.get('RENDER_URL', self._render_url)
return self._render_url
@property
def RENDER(self):
if not self._PROXY_DJANGO_SETTINGS:
return self._render
from django.conf import settings
if hasattr(settings, 'REACT'):
return settings.REACT.get('RENDER', self._render)
return self._render
def configure(self, RENDER_URL=None, RENDER=None):
if RENDER_URL is not None:
self._render_url = RENDER_URL
if RENDER is not None:
self._render = RENDER
settings = Conf()
|
markfinger/python-react
|
react/conf.py
|
Python
|
mit
| 1,159
|
import os
import json
from glob import iglob
from decimal import Decimal, InvalidOperation
try:
from collections import OrderedDict
except ImportError:
# Fallback for python 2.6
from ordereddict import OrderedDict
from cnab240 import errors
class CampoBase(object):
def __init__(self):
self._valor = None
@property
def valor(self):
return self._valor
@valor.setter
def valor(self, valor):
if self.formato == 'alfa':
if not isinstance(valor, unicode):
raise errors.TipoError(self, valor)
if len(valor) > self.digitos:
raise errors.NumDigitosExcedidoError(self, valor)
elif self.decimais:
if not isinstance(valor, Decimal):
raise errors.TipoError(self, valor)
num_decimais = valor.as_tuple().exponent * -1
if num_decimais != self.decimais:
raise errors.NumDecimaisError(self, valor)
if len(str(valor).replace('.', '')) > self.digitos:
raise errors.NumDigitosExcedidoError(self, valor)
else:
if not isinstance(valor, (int, long)):
raise errors.TipoError(self, valor)
if len(str(valor)) > self.digitos:
raise errors.NumDigitosExcedidoError(self, valor)
self._valor = valor
def __unicode__(self):
if self.valor is None:
if self.default is not None:
if self.decimais:
self.valor = Decimal('{0:0.{1}f}'.format(self.default,
self.decimais))
else:
self.valor = self.default
else:
raise errors.CampoObrigatorioError(self.nome)
if self.formato == 'alfa' or self.decimais:
if self.decimais:
valor = unicode(self.valor).replace('.', '')
chars_faltantes = self.digitos - len(valor)
return (u'0' * chars_faltantes) + valor
else:
chars_faltantes = self.digitos - len(self.valor)
return self.valor + (u' ' * chars_faltantes)
return u'{0:0{1}d}'.format(self.valor, self.digitos)
def __repr__(self):
return unicode(self)
def __set__(self, instance, value):
self.valor = value
def __get__(self, instance, owner):
return self.valor
def criar_classe_campo(spec):
nome = spec.get('nome')
inicio = spec.get('posicao_inicio') - 1
fim = spec.get('posicao_fim')
attrs = {
'nome': nome,
'inicio': inicio,
'fim': fim,
'digitos': fim - inicio,
'formato': spec.get('formato', 'alfa'),
'decimais': spec.get('decimais', 0),
'default': spec.get('default'),
}
return type(nome.encode('utf8'), (CampoBase,), attrs)
class RegistroBase(object):
def __new__(cls, **kwargs):
campos = OrderedDict()
attrs = {'_campos': campos}
for Campo in cls._campos_cls.values():
campo = Campo()
campos.update({campo.nome: campo})
attrs.update({campo.nome: campo})
new_cls = type(cls.__name__, (cls, ), attrs)
return super(RegistroBase, cls).__new__(new_cls, **kwargs)
def __init__(self, **kwargs):
self.fromdict(kwargs)
def necessario(self):
for campo in self._campos.values():
eh_controle = campo.nome.startswith('controle_') or \
campo.nome.startswith('servico_')
if not eh_controle and campo.valor != None:
return True
return False
def todict(self):
data_dict = dict()
for campo in self._campos.values():
if campo.valor is not None:
data_dict[campo.nome] = campo.valor
return data_dict
def fromdict(self, data_dict):
ignore_fields = lambda key: any((
key.startswith('vazio'),
key.startswith('servico_'),
key.startswith('controle_'),
))
for key, value in data_dict.items():
if hasattr(self, key) and not ignore_fields(key):
setattr(self, key, value)
def carregar(self, registro_str):
for campo in self._campos.values():
valor = registro_str[campo.inicio:campo.fim].strip()
if campo.decimais:
exponente = campo.decimais * -1
dec = valor[:exponente] + '.' + valor[exponente:]
try:
campo.valor = Decimal(dec)
except InvalidOperation:
raise # raise custom?
elif campo.formato == 'num':
try:
campo.valor = int(valor)
except ValueError:
raise errors.TipoError(campo, valor)
else:
campo.valor = valor
def __unicode__(self):
return ''.join([unicode(campo) for campo in self._campos.values()])
class Registros(object):
def __init__(self, specs_dirpath):
# TODO: Validar spec: nome (deve ser unico para cada registro),
# posicao_inicio, posicao_fim, formato (alpha), decimais (0),
# default (zeros se numerico ou brancos se alfa)
registro_filepath_list = iglob(os.path.join(specs_dirpath, '*.json'))
for registro_filepath in registro_filepath_list:
registro_file = open(registro_filepath)
spec = json.load(registro_file)
registro_file.close()
setattr(self, spec.get('nome'), self.criar_classe_registro(spec))
def criar_classe_registro(self, spec):
campos = OrderedDict()
attrs = {'_campos_cls': campos}
cls_name = spec.get('nome').encode('utf8')
campo_specs = spec.get('campos', {})
for key in sorted(campo_specs.iterkeys()):
Campo = criar_classe_campo(campo_specs[key])
entrada = {Campo.nome: Campo}
campos.update(entrada)
return type(cls_name, (RegistroBase, ), attrs)
|
TracyWebTech/cnab240
|
cnab240/registro.py
|
Python
|
mit
| 6,411
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class Queries(object):
"""Queries operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_boolean_true(
self, bool_query=False, custom_headers=None, raw=False, **operation_config):
"""Get true Boolean value on path.
:param bool_query: true boolean value
:type bool_query: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/bool/true'
# Construct parameters
query_parameters = {}
query_parameters['boolQuery'] = self._serialize.query("bool_query", bool_query, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_boolean_false(
self, bool_query=False, custom_headers=None, raw=False, **operation_config):
"""Get false Boolean value on path.
:param bool_query: false boolean value
:type bool_query: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/bool/false'
# Construct parameters
query_parameters = {}
query_parameters['boolQuery'] = self._serialize.query("bool_query", bool_query, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_boolean_null(
self, bool_query=None, custom_headers=None, raw=False, **operation_config):
"""Get null Boolean value on query (query string should be absent).
:param bool_query: null boolean value
:type bool_query: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/bool/null'
# Construct parameters
query_parameters = {}
if bool_query is not None:
query_parameters['boolQuery'] = self._serialize.query("bool_query", bool_query, 'bool')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_one_million(
self, int_query=1000000, custom_headers=None, raw=False, **operation_config):
"""Get '1000000' integer value.
:param int_query: '1000000' integer value
:type int_query: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/int/1000000'
# Construct parameters
query_parameters = {}
query_parameters['intQuery'] = self._serialize.query("int_query", int_query, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_negative_one_million(
self, int_query=-1000000, custom_headers=None, raw=False, **operation_config):
"""Get '-1000000' integer value.
:param int_query: '-1000000' integer value
:type int_query: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/int/-1000000'
# Construct parameters
query_parameters = {}
query_parameters['intQuery'] = self._serialize.query("int_query", int_query, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_int_null(
self, int_query=None, custom_headers=None, raw=False, **operation_config):
"""Get null integer value (no query parameter).
:param int_query: null integer value
:type int_query: int
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/int/null'
# Construct parameters
query_parameters = {}
if int_query is not None:
query_parameters['intQuery'] = self._serialize.query("int_query", int_query, 'int')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_ten_billion(
self, long_query=10000000000, custom_headers=None, raw=False, **operation_config):
"""Get '10000000000' 64 bit integer value.
:param long_query: '10000000000' 64 bit integer value
:type long_query: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/long/10000000000'
# Construct parameters
query_parameters = {}
query_parameters['longQuery'] = self._serialize.query("long_query", long_query, 'long')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_negative_ten_billion(
self, long_query=-10000000000, custom_headers=None, raw=False, **operation_config):
"""Get '-10000000000' 64 bit integer value.
:param long_query: '-10000000000' 64 bit integer value
:type long_query: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/long/-10000000000'
# Construct parameters
query_parameters = {}
query_parameters['longQuery'] = self._serialize.query("long_query", long_query, 'long')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_long_null(
self, long_query=None, custom_headers=None, raw=False, **operation_config):
"""Get 'null 64 bit integer value (no query param in uri).
:param long_query: null 64 bit integer value
:type long_query: long
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/long/null'
# Construct parameters
query_parameters = {}
if long_query is not None:
query_parameters['longQuery'] = self._serialize.query("long_query", long_query, 'long')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def float_scientific_positive(
self, float_query=1.034E+20, custom_headers=None, raw=False, **operation_config):
"""Get '1.034E+20' numeric value.
:param float_query: '1.034E+20'numeric value
:type float_query: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/float/1.034E+20'
# Construct parameters
query_parameters = {}
query_parameters['floatQuery'] = self._serialize.query("float_query", float_query, 'float')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def float_scientific_negative(
self, float_query=-1.034E-20, custom_headers=None, raw=False, **operation_config):
"""Get '-1.034E-20' numeric value.
:param float_query: '-1.034E-20'numeric value
:type float_query: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/float/-1.034E-20'
# Construct parameters
query_parameters = {}
query_parameters['floatQuery'] = self._serialize.query("float_query", float_query, 'float')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def float_null(
self, float_query=None, custom_headers=None, raw=False, **operation_config):
"""Get null numeric value (no query parameter).
:param float_query: null numeric value
:type float_query: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/float/null'
# Construct parameters
query_parameters = {}
if float_query is not None:
query_parameters['floatQuery'] = self._serialize.query("float_query", float_query, 'float')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def double_decimal_positive(
self, double_query=9999999.999, custom_headers=None, raw=False, **operation_config):
"""Get '9999999.999' numeric value.
:param double_query: '9999999.999'numeric value
:type double_query: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/double/9999999.999'
# Construct parameters
query_parameters = {}
query_parameters['doubleQuery'] = self._serialize.query("double_query", double_query, 'float')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def double_decimal_negative(
self, double_query=-9999999.999, custom_headers=None, raw=False, **operation_config):
"""Get '-9999999.999' numeric value.
:param double_query: '-9999999.999'numeric value
:type double_query: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/double/-9999999.999'
# Construct parameters
query_parameters = {}
query_parameters['doubleQuery'] = self._serialize.query("double_query", double_query, 'float')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def double_null(
self, double_query=None, custom_headers=None, raw=False, **operation_config):
"""Get null numeric value (no query parameter).
:param double_query: null numeric value
:type double_query: float
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/double/null'
# Construct parameters
query_parameters = {}
if double_query is not None:
query_parameters['doubleQuery'] = self._serialize.query("double_query", double_query, 'float')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_unicode(
self, string_query="啊齄丂狛狜隣郎隣兀﨩", custom_headers=None, raw=False, **operation_config):
"""Get '啊齄丂狛狜隣郎隣兀﨩' multi-byte string value.
:param string_query: '啊齄丂狛狜隣郎隣兀﨩'multi-byte string value
:type string_query: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/string/unicode/'
# Construct parameters
query_parameters = {}
query_parameters['stringQuery'] = self._serialize.query("string_query", string_query, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_url_encoded(
self, string_query="begin!*'();:@ &=+$,/?#[]end", custom_headers=None, raw=False, **operation_config):
"""Get 'begin!*'();:@ &=+$,/?#[]end.
:param string_query: 'begin!*'();:@ &=+$,/?#[]end' url encoded string
value
:type string_query: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/string/begin%21%2A%27%28%29%3B%3A%40%20%26%3D%2B%24%2C%2F%3F%23%5B%5Dend'
# Construct parameters
query_parameters = {}
query_parameters['stringQuery'] = self._serialize.query("string_query", string_query, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_empty(
self, string_query="", custom_headers=None, raw=False, **operation_config):
"""Get ''.
:param string_query: '' string value
:type string_query: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/string/empty'
# Construct parameters
query_parameters = {}
query_parameters['stringQuery'] = self._serialize.query("string_query", string_query, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def string_null(
self, string_query=None, custom_headers=None, raw=False, **operation_config):
"""Get null (no query parameter in url).
:param string_query: null string value
:type string_query: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/string/null'
# Construct parameters
query_parameters = {}
if string_query is not None:
query_parameters['stringQuery'] = self._serialize.query("string_query", string_query, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enum_valid(
self, enum_query=None, custom_headers=None, raw=False, **operation_config):
"""Get using uri with query parameter 'green color'.
:param enum_query: 'green color' enum value. Possible values include:
'red color', 'green color', 'blue color'
:type enum_query: str or :class:`UriColor
<fixtures.acceptancetestsurl.models.UriColor>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/enum/green%20color'
# Construct parameters
query_parameters = {}
if enum_query is not None:
query_parameters['enumQuery'] = self._serialize.query("enum_query", enum_query, 'UriColor')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def enum_null(
self, enum_query=None, custom_headers=None, raw=False, **operation_config):
"""Get null (no query parameter in url).
:param enum_query: null string value. Possible values include: 'red
color', 'green color', 'blue color'
:type enum_query: str or :class:`UriColor
<fixtures.acceptancetestsurl.models.UriColor>`
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/enum/null'
# Construct parameters
query_parameters = {}
if enum_query is not None:
query_parameters['enumQuery'] = self._serialize.query("enum_query", enum_query, 'UriColor')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_multi_byte(
self, byte_query=None, custom_headers=None, raw=False, **operation_config):
"""Get '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte array.
:param byte_query: '啊齄丂狛狜隣郎隣兀﨩' multibyte value as utf-8 encoded byte
array
:type byte_query: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/byte/multibyte'
# Construct parameters
query_parameters = {}
if byte_query is not None:
query_parameters['byteQuery'] = self._serialize.query("byte_query", byte_query, 'bytearray')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_empty(
self, byte_query=bytearray("", encoding="utf-8"), custom_headers=None, raw=False, **operation_config):
"""Get '' as byte array.
:param byte_query: '' as byte array
:type byte_query: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/byte/empty'
# Construct parameters
query_parameters = {}
query_parameters['byteQuery'] = self._serialize.query("byte_query", byte_query, 'bytearray')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def byte_null(
self, byte_query=None, custom_headers=None, raw=False, **operation_config):
"""Get null as byte array (no query parameters in uri).
:param byte_query: null as byte array (no query parameters in uri)
:type byte_query: bytearray
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/byte/null'
# Construct parameters
query_parameters = {}
if byte_query is not None:
query_parameters['byteQuery'] = self._serialize.query("byte_query", byte_query, 'bytearray')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_valid(
self, date_query, custom_headers=None, raw=False, **operation_config):
"""Get '2012-01-01' as date.
:param date_query: '2012-01-01' as date
:type date_query: date
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/date/2012-01-01'
# Construct parameters
query_parameters = {}
query_parameters['dateQuery'] = self._serialize.query("date_query", date_query, 'date')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_null(
self, date_query=None, custom_headers=None, raw=False, **operation_config):
"""Get null as date - this should result in no query parameters in uri.
:param date_query: null as date (no query parameters in uri)
:type date_query: date
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/date/null'
# Construct parameters
query_parameters = {}
if date_query is not None:
query_parameters['dateQuery'] = self._serialize.query("date_query", date_query, 'date')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_time_valid(
self, date_time_query, custom_headers=None, raw=False, **operation_config):
"""Get '2012-01-01T01:01:01Z' as date-time.
:param date_time_query: '2012-01-01T01:01:01Z' as date-time
:type date_time_query: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/datetime/2012-01-01T01%3A01%3A01Z'
# Construct parameters
query_parameters = {}
query_parameters['dateTimeQuery'] = self._serialize.query("date_time_query", date_time_query, 'iso-8601')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def date_time_null(
self, date_time_query=None, custom_headers=None, raw=False, **operation_config):
"""Get null as date-time, should result in no query parameters in uri.
:param date_time_query: null as date-time (no query parameters)
:type date_time_query: datetime
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/datetime/null'
# Construct parameters
query_parameters = {}
if date_time_query is not None:
query_parameters['dateTimeQuery'] = self._serialize.query("date_time_query", date_time_query, 'iso-8601')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def array_string_csv_valid(
self, array_query=None, custom_headers=None, raw=False, **operation_config):
"""Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' ,
null, ''] using the csv-array format.
:param array_query: an array of string ['ArrayQuery1', 'begin!*'();:@
&=+$,/?#[]end' , null, ''] using the csv-array format
:type array_query: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/array/csv/string/valid'
# Construct parameters
query_parameters = {}
if array_query is not None:
query_parameters['arrayQuery'] = self._serialize.query("array_query", array_query, '[str]', div=',')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def array_string_csv_null(
self, array_query=None, custom_headers=None, raw=False, **operation_config):
"""Get a null array of string using the csv-array format.
:param array_query: a null array of string using the csv-array format
:type array_query: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/array/csv/string/null'
# Construct parameters
query_parameters = {}
if array_query is not None:
query_parameters['arrayQuery'] = self._serialize.query("array_query", array_query, '[str]', div=',')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def array_string_csv_empty(
self, array_query=None, custom_headers=None, raw=False, **operation_config):
"""Get an empty array [] of string using the csv-array format.
:param array_query: an empty array [] of string using the csv-array
format
:type array_query: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/array/csv/string/empty'
# Construct parameters
query_parameters = {}
if array_query is not None:
query_parameters['arrayQuery'] = self._serialize.query("array_query", array_query, '[str]', div=',')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def array_string_ssv_valid(
self, array_query=None, custom_headers=None, raw=False, **operation_config):
"""Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' ,
null, ''] using the ssv-array format.
:param array_query: an array of string ['ArrayQuery1', 'begin!*'();:@
&=+$,/?#[]end' , null, ''] using the ssv-array format
:type array_query: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/array/ssv/string/valid'
# Construct parameters
query_parameters = {}
if array_query is not None:
query_parameters['arrayQuery'] = self._serialize.query("array_query", array_query, '[str]', div=' ')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def array_string_tsv_valid(
self, array_query=None, custom_headers=None, raw=False, **operation_config):
"""Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' ,
null, ''] using the tsv-array format.
:param array_query: an array of string ['ArrayQuery1', 'begin!*'();:@
&=+$,/?#[]end' , null, ''] using the tsv-array format
:type array_query: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/array/tsv/string/valid'
# Construct parameters
query_parameters = {}
if array_query is not None:
query_parameters['arrayQuery'] = self._serialize.query("array_query", array_query, '[str]', div=' ')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def array_string_pipes_valid(
self, array_query=None, custom_headers=None, raw=False, **operation_config):
"""Get an array of string ['ArrayQuery1', 'begin!*'();:@ &=+$,/?#[]end' ,
null, ''] using the pipes-array format.
:param array_query: an array of string ['ArrayQuery1', 'begin!*'();:@
&=+$,/?#[]end' , null, ''] using the pipes-array format
:type array_query: list of str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/queries/array/pipes/string/valid'
# Construct parameters
query_parameters = {}
if array_query is not None:
query_parameters['arrayQuery'] = self._serialize.query("array_query", array_query, '[str]', div='|')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
John-Hart/autorest
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/Url/autoresturltestservice/operations/queries.py
|
Python
|
mit
| 55,660
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
# The wikis of Chapters of the Wikimedia Foundation living at a xy.wikimedia.org url
from pywikibot import family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'wikimediachapter'
self.countries = [
'ar', 'bd', 'co', 'dk', 'fi', 'mk', 'mx', 'nl', 'no', 'nyc', 'pl',
'rs', 'ru', 'se', 'ua', 'uk', 've',
]
self.countrylangs = {
'ar': 'es', 'bd': 'bn', 'co': 'es', 'dk': 'da', 'fi': 'fi',
'mk': 'mk', 'mx': 'es', 'nl': 'nl', 'no': 'no', 'nyc': 'en',
'pl': 'pl', 'rs': 'sr', 'ru': 'ru', 'se': 'sv', 'ua': 'uk',
'uk': 'en-gb', 've': 'en',
}
self.langs = dict([(country, '%s.wikimedia.org' % country)
for country in self.countries])
|
pywikibot/core-migration-example
|
pywikibot/families/wikimedia_family.py
|
Python
|
mit
| 882
|
import cv2
import numpy as np
from cam import OpenCV_Cam
def get_mask(img, lower, upper, blur=0):
if lower[0] < 0:
offset = -lower[0]
else:
offset = 0
lower[0] = lower[0] + offset
upper[0] = upper[0] + offset
hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv_img = cv2.blur(hsv_img, (blur, blur))
shift = np.full_like(img, 0, np.uint8)
shift[:,:,0] = offset
hsv_img = hsv_img + shift
lower = np.array(lower, dtype = "uint8")
upper = np.array(upper, dtype = "uint8")
mask = cv2.inRange(hsv_img, lower, upper)
return mask
if __name__ == '__main__':
cam = OpenCV_Cam()
red_bound = ([-5, 150, 0], [15, 255, 255])
green_bound = ([80, 100, 0], [105, 255, 255])
while True:
image = cam.read()
red_mask = get_mask(image, *red_bound)
green_mask = get_mask(image, *green_bound)
masks = cv2.bitwise_or(red_mask, green_mask)
cv2.imshow('masks', np.hstack([red_mask, green_mask]))
output = cv2.bitwise_and(image, image, mask = masks)
# show the images
cv2.imshow("images", np.hstack([image, output]))
k = cv2.waitKey(10)
if k == 27:
break
|
ironbox360/PyCV-time
|
experiments/red-green-heading/get_color_mask.py
|
Python
|
mit
| 1,237
|
# *****************************************************************************
# Copyright (c) 2019 IBM Corporation and other Contributors.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
# *****************************************************************************
from wiotp.sdk.api.actions.actions import Actions
|
ibm-watson-iot/iot-python
|
src/wiotp/sdk/api/actions/__init__.py
|
Python
|
epl-1.0
| 516
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'snow'
|
fairy-rui/oj
|
core/judgesite/__init__.py
|
Python
|
gpl-2.0
| 67
|
import os
try:
import autotest.common as common
except ImportError:
import common
# High level way of installing each autotest component
import client.setup
import frontend.setup
import cli.setup
import server.setup
import scheduler.setup
import database_legacy.setup
import tko.setup
import utils.setup
import mirror.setup
import installation_support.setup
from distutils.core import setup
from sphinx.setup_command import BuildDoc
cmdclass = {'build_doc': BuildDoc}
from autotest.client.shared import version
def _combine_dicts(list_dicts):
result_dict = {}
for d in list_dicts:
for k in d:
result_dict[k] = d[k]
return result_dict
def _fix_data_paths(package_data_dict):
'''
Corrects package data paths
When the package name is compound, and the package contents, that
is, file paths, contain the same path name found in the package
name, setuptools thinks there's an extra directory. This checks
that condition and adjusts (strips) the 1st directory name.
'''
result = {}
for package_name, package_content in package_data_dict.items():
package_structure = package_name.split('.')
package_structure_1st_level = package_structure[1]
result[package_name] = []
for p in package_content:
path_structure = p.split(os.path.sep)
path_structure_1st_level = path_structure[0]
if package_structure_1st_level == path_structure_1st_level:
path = os.path.join(*path_structure[1:])
else:
path = p
result[package_name].append(path)
return result
def get_package_dir():
return _combine_dicts([client.setup.get_package_dir(),
frontend.setup.get_package_dir(),
cli.setup.get_package_dir(),
server.setup.get_package_dir(),
scheduler.setup.get_package_dir(),
database_legacy.setup.get_package_dir(),
tko.setup.get_package_dir(),
utils.setup.get_package_dir(),
mirror.setup.get_package_dir()])
def get_packages():
return (client.setup.get_packages() +
frontend.setup.get_packages() +
cli.setup.get_packages() +
server.setup.get_packages() +
scheduler.setup.get_packages() +
database_legacy.setup.get_packages() +
tko.setup.get_packages() +
utils.setup.get_packages() +
mirror.setup.get_packages() +
installation_support.setup.get_packages())
def get_data_files():
return (client.setup.get_data_files() +
tko.setup.get_data_files() +
utils.setup.get_data_files() +
mirror.setup.get_data_files())
def get_package_data():
return _combine_dicts([
_fix_data_paths(client.setup.get_package_data()),
_fix_data_paths(frontend.setup.get_package_data()),
_fix_data_paths(cli.setup.get_package_data()),
_fix_data_paths(server.setup.get_package_data()),
_fix_data_paths(scheduler.setup.get_package_data()),
_fix_data_paths(database_legacy.setup.get_package_data()),
_fix_data_paths(tko.setup.get_package_data()),
_fix_data_paths(utils.setup.get_package_data())
])
def get_scripts():
return (client.setup.get_scripts() +
frontend.setup.get_scripts() +
cli.setup.get_scripts() +
server.setup.get_scripts() +
scheduler.setup.get_scripts() +
database_legacy.setup.get_scripts() +
tko.setup.get_scripts() +
installation_support.setup.get_scripts())
def run():
setup(name='autotest',
description='Autotest test framework',
maintainer='Lucas Meneghel Rodrigues',
maintainer_email='lmr@redhat.com',
version=version.get_version(),
url='http://autotest.github.com',
package_dir=get_package_dir(),
package_data=get_package_data(),
packages= get_packages(),
scripts=get_scripts(),
data_files=get_data_files(),
cmdclass=cmdclass,
command_options={
'build_doc': {
'source_dir': ('setup.py', 'documentation/source')
}
}
)
if __name__ == '__main__':
run()
|
yangdongsheng/autotest
|
setup.py
|
Python
|
gpl-2.0
| 4,500
|
from func import *
logger = logging.getLogger('neuromodulation')
startbuild = datetime.datetime.now()
# Connect the volume transmitter to the parts
vt_dopa_ex = nest.Create('volume_transmitter')
vt_dopa_in = nest.Create('volume_transmitter')
vt_sero_ex = nest.Create('volume_transmitter')
vt_sero_in = nest.Create('volume_transmitter')
DOPA_synparams_ex['vt'] = vt_dopa_ex[0]
DOPA_synparams_in['vt'] = vt_dopa_in[0]
SERO_synparams_ex['vt'] = vt_sero_ex[0]
SERO_synparams_in['vt'] = vt_sero_in[0]
nest.CopyModel('stdp_dopamine_synapse', dopa_model_ex, DOPA_synparams_ex)
nest.CopyModel('stdp_dopamine_synapse', dopa_model_in, DOPA_synparams_in)
nest.CopyModel('stdp_serotonine_synapse', sero_model_in, SERO_synparams_in)
nest.CopyModel('stdp_serotonine_synapse', sero_model_ex, SERO_synparams_ex)
nest.Connect(lateralcortex[lateralcortex_5HT][k_IDs], vt_sero_in)
nest.Connect(Basalganglia[Basalganglia_5HT][k_IDs], vt_sero_in)
nest.Connect(entorhinalcortex[entorhinalcortex_5HT][k_IDs], vt_sero_in)
nest.Connect(medialcortex[medialcortex_5HT][k_IDs], vt_sero_in)
nest.Connect(locuscoeruleus[locuscoeruleus_5HT][k_IDs], vt_sero_in)
nest.Connect(locuscoeruleus[locuscoeruleus_5HT][k_IDs], vt_sero_ex)
nest.Connect(locuscoeruleus[locuscoeruleus_DA][k_IDs], vt_dopa_ex)
nest.Connect(ventraltegmentalarea[ventraltegmentalarea_5HT][k_IDs], vt_sero_in)
nest.Connect(ventraltegmentalarea[ventraltegmentalarea_DA][k_IDs], vt_dopa_ex)
nest.Connect(Cerebralcortex[Cerebralcortex_5HT][k_IDs], vt_sero_in)
nest.Connect(Thalamus[Thalamus_5HT][k_IDs], vt_sero_in)
nest.Connect(insularcortex[insularcortex_5HT][k_IDs], vt_sero_in)
nest.Connect(septum[septum_5HT][k_IDs], vt_sero_in)
nest.Connect(hypothalamus[hypothalamus_5HT][k_IDs], vt_sero_in)
nest.Connect(hippocampus[hippocampus_5HT][k_IDs], vt_sero_in)
nest.Connect(neocortex[neocortex_5HT][k_IDs], vt_sero_in)
nest.Connect(DR[DR_5HT][k_IDs], vt_sero_in)
nest.Connect(MnR[MnR_5HT][k_IDs], vt_sero_in)
nest.Connect(pons[pons_5HT][k_IDs], vt_sero_in)
nest.Connect(Periaqueductalgray[Periaqueductalgray_5HT][k_IDs], vt_sero_in)
nest.Connect(prefrontalcortex[prefrontalcortex_5HT][k_IDs], vt_sero_in)
nest.Connect(prefrontalcortex[prefrontalcortex_DA][k_IDs], vt_sero_in)
nest.Connect(striatum[striatum_5HT][k_IDs], vt_dopa_ex)
nest.Connect(striatum[striatum_DA][k_IDs], vt_dopa_ex)
nest.Connect(substantianigra[substantianigra_5HT][k_IDs], vt_sero_in)
nest.Connect(substantianigra[substantianigra_DA][k_IDs], vt_dopa_ex)
connect(lateralcortex[lateralcortex_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Basalganglia[Basalganglia_5HT], Rostralgroup[Rostralgroup_A1], syn_type=SERO_in, weight_coef=1.000000000)
connect(Basalganglia[Basalganglia_5HT], Rostralgroup[Rostralgroup_A2], syn_type=SERO_in, weight_coef=1.000000000)
connect(entorhinalcortex[entorhinalcortex_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(medialcortex[medialcortex_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(locuscoeruleus[locuscoeruleus_5HT], locuscoeruleus[locuscoeruleus_NA], syn_type=SERO_in, weight_coef=1.000000000)
connect(locuscoeruleus[locuscoeruleus_5HT], locuscoeruleus[locuscoeruleus_DA], syn_type=SERO_ex, weight_coef=1.000000000)
connect(locuscoeruleus[locuscoeruleus_DA], DR[DR_5HT], syn_type=DA_ex, weight_coef=1.000000000)
connect(ventraltegmentalarea[ventraltegmentalarea_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(ventraltegmentalarea[ventraltegmentalarea_5HT], ventraltegmentalarea[ventraltegmentalarea_DA], syn_type=SERO_in, weight_coef=1.000000000)
connect(ventraltegmentalarea[ventraltegmentalarea_DA], nucleusaccumbens[nucleusaccumbens_5HT], syn_type=DA_ex, weight_coef=1.000000000)
connect(ventraltegmentalarea[ventraltegmentalarea_DA], nucleusaccumbens[nucleusaccumbens_DA], syn_type=DA_ex, weight_coef=1.000000000)
connect(ventraltegmentalarea[ventraltegmentalarea_DA], striatum[striatum_5HT], syn_type=DA_ex, weight_coef=1.000000000)
connect(ventraltegmentalarea[ventraltegmentalarea_DA], striatum[striatum_DA], syn_type=DA_ex, weight_coef=1.000000000)
connect(ventraltegmentalarea[ventraltegmentalarea_DA], prefrontalcortex[prefrontalcortex_5HT], syn_type=DA_ex, weight_coef=1.000000000)
connect(ventraltegmentalarea[ventraltegmentalarea_DA], prefrontalcortex[prefrontalcortex_DA], syn_type=DA_ex, weight_coef=1.000000000)
connect(Cerebralcortex[Cerebralcortex_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Cerebralcortex[Cerebralcortex_5HT], striatum[striatum_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Cerebralcortex[Cerebralcortex_5HT], striatum[striatum_DA], syn_type=SERO_in, weight_coef=1.000000000)
connect(Cerebralcortex[Cerebralcortex_5HT], substantianigra[substantianigra_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Cerebralcortex[Cerebralcortex_5HT], substantianigra[substantianigra_DA], syn_type=SERO_in, weight_coef=1.000000000)
connect(Cerebralcortex[Cerebralcortex_5HT], Basalganglia[Basalganglia_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Thalamus[Thalamus_5HT], medialcortex[medialcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Thalamus[Thalamus_5HT], neocortex[neocortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Thalamus[Thalamus_5HT], lateralcortex[lateralcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Thalamus[Thalamus_5HT], entorhinalcortex[entorhinalcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Thalamus[Thalamus_5HT], prefrontalcortex[prefrontalcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Thalamus[Thalamus_5HT], prefrontalcortex[prefrontalcortex_DA], syn_type=SERO_in, weight_coef=1.000000000)
connect(Thalamus[Thalamus_5HT], insularcortex[insularcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Thalamus[Thalamus_5HT], Cerebralcortex[Cerebralcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(insularcortex[insularcortex_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(septum[septum_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(septum[septum_5HT], MnR[MnR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(septum[septum_5HT], Rostralgroup[Rostralgroup_A1], syn_type=SERO_in, weight_coef=1.000000000)
connect(septum[septum_5HT], Rostralgroup[Rostralgroup_A2], syn_type=SERO_in, weight_coef=1.000000000)
connect(hypothalamus[hypothalamus_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(hippocampus[hippocampus_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(neocortex[neocortex_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], lateralcortex[lateralcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], entorhinalcortex[entorhinalcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], prefrontalcortex[prefrontalcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], prefrontalcortex[prefrontalcortex_DA], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], septum[septum_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], lateraltegmentalarea[lateraltegmentalarea_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], nucleusaccumbens[nucleusaccumbens_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], nucleusaccumbens[nucleusaccumbens_DA], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], striatum[striatum_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], striatum[striatum_DA], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], bednucleusofthestriaterminalis[bednucleusofthestriaterminalis_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], Thalamus[Thalamus_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], Basalganglia[Basalganglia_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], amygdala[amygdala_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(DR[DR_5HT], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(MnR[MnR_5HT], ventraltegmentalarea[ventraltegmentalarea_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(MnR[MnR_5HT], ventraltegmentalarea[ventraltegmentalarea_DA], syn_type=SERO_in, weight_coef=1.000000000)
connect(MnR[MnR_5HT], hypothalamus[hypothalamus_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(MnR[MnR_5HT], Thalamus[Thalamus_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(MnR[MnR_5HT], neocortex[neocortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(MnR[MnR_5HT], medialcortex[medialcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(MnR[MnR_5HT], Cerebralcortex[Cerebralcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(MnR[MnR_5HT], hippocampus[hippocampus_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(MnR[MnR_5HT], insularcortex[insularcortex_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(pons[pons_5HT], MnR[MnR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(Periaqueductalgray[Periaqueductalgray_5HT], MnR[MnR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(prefrontalcortex[prefrontalcortex_5HT], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(prefrontalcortex[prefrontalcortex_DA], DR[DR_5HT], syn_type=SERO_in, weight_coef=1.000000000)
connect(striatum[striatum_5HT], striatum[striatum_DA], syn_type=DA_ex, weight_coef=1.000000000)
connect(striatum[striatum_DA], substantianigra[substantianigra_5HT], syn_type=DA_ex, weight_coef=1.000000000)
connect(striatum[striatum_DA], substantianigra[substantianigra_DA], syn_type=DA_ex, weight_coef=1.000000000)
connect(substantianigra[substantianigra_5HT], substantianigra[substantianigra_DA], syn_type=SERO_in, weight_coef=1.000000000)
connect(substantianigra[substantianigra_DA], nucleusaccumbens[nucleusaccumbens_5HT], syn_type=DA_ex, weight_coef=1.000000000)
connect(substantianigra[substantianigra_DA], nucleusaccumbens[nucleusaccumbens_DA], syn_type=DA_ex, weight_coef=1.000000000)
connect(substantianigra[substantianigra_DA], striatum[striatum_5HT], syn_type=DA_ex, weight_coef=1.000000000)
connect(substantianigra[substantianigra_DA], striatum[striatum_DA], syn_type=DA_ex, weight_coef=1.000000000)
logger.debug("* * * Creating spike generators...")
connect_generator(Thalamus[Thalamus_5HT], startTime=200., stopTime=600., rate=200.000000000, coef_part=1.0)
logger.debug("* * * Attaching spikes detector")
logger.debug("* * * Attaching multimeters")
connect_detector(lateralcortex[lateralcortex_5HT])
connect_multimeter(lateralcortex[lateralcortex_5HT])
connect_detector(Basalganglia[Basalganglia_5HT])
connect_multimeter(Basalganglia[Basalganglia_5HT])
connect_detector(entorhinalcortex[entorhinalcortex_5HT])
connect_multimeter(entorhinalcortex[entorhinalcortex_5HT])
connect_detector(medialcortex[medialcortex_5HT])
connect_multimeter(medialcortex[medialcortex_5HT])
connect_detector(locuscoeruleus[locuscoeruleus_5HT])
connect_multimeter(locuscoeruleus[locuscoeruleus_5HT])
connect_detector(locuscoeruleus[locuscoeruleus_DA])
connect_multimeter(locuscoeruleus[locuscoeruleus_DA])
connect_detector(locuscoeruleus[locuscoeruleus_NA])
connect_multimeter(locuscoeruleus[locuscoeruleus_NA])
connect_detector(ventraltegmentalarea[ventraltegmentalarea_5HT])
connect_multimeter(ventraltegmentalarea[ventraltegmentalarea_5HT])
connect_detector(ventraltegmentalarea[ventraltegmentalarea_DA])
connect_multimeter(ventraltegmentalarea[ventraltegmentalarea_DA])
connect_detector(nucleusaccumbens[nucleusaccumbens_5HT])
connect_multimeter(nucleusaccumbens[nucleusaccumbens_5HT])
connect_detector(nucleusaccumbens[nucleusaccumbens_DA])
connect_multimeter(nucleusaccumbens[nucleusaccumbens_DA])
connect_detector(Cerebralcortex[Cerebralcortex_5HT])
connect_multimeter(Cerebralcortex[Cerebralcortex_5HT])
connect_detector(Thalamus[Thalamus_5HT])
connect_multimeter(Thalamus[Thalamus_5HT])
connect_detector(insularcortex[insularcortex_5HT])
connect_multimeter(insularcortex[insularcortex_5HT])
connect_detector(Rostralgroup[Rostralgroup_A1])
connect_multimeter(Rostralgroup[Rostralgroup_A1])
connect_detector(Rostralgroup[Rostralgroup_A2])
connect_multimeter(Rostralgroup[Rostralgroup_A2])
connect_detector(septum[septum_5HT])
connect_multimeter(septum[septum_5HT])
connect_detector(hypothalamus[hypothalamus_5HT])
connect_multimeter(hypothalamus[hypothalamus_5HT])
connect_detector(hippocampus[hippocampus_5HT])
connect_multimeter(hippocampus[hippocampus_5HT])
connect_detector(lateraltegmentalarea[lateraltegmentalarea_5HT])
connect_multimeter(lateraltegmentalarea[lateraltegmentalarea_5HT])
connect_detector(neocortex[neocortex_5HT])
connect_multimeter(neocortex[neocortex_5HT])
connect_detector(bednucleusofthestriaterminalis[bednucleusofthestriaterminalis_5HT])
connect_multimeter(bednucleusofthestriaterminalis[bednucleusofthestriaterminalis_5HT])
connect_detector(DR[DR_5HT])
connect_multimeter(DR[DR_5HT])
connect_detector(MnR[MnR_5HT])
connect_multimeter(MnR[MnR_5HT])
connect_detector(reticularformation[reticularformation_5HT])
connect_multimeter(reticularformation[reticularformation_5HT])
connect_detector(pons[pons_5HT])
connect_multimeter(pons[pons_5HT])
connect_detector(Periaqueductalgray[Periaqueductalgray_5HT])
connect_multimeter(Periaqueductalgray[Periaqueductalgray_5HT])
connect_detector(prefrontalcortex[prefrontalcortex_5HT])
connect_multimeter(prefrontalcortex[prefrontalcortex_5HT])
connect_detector(prefrontalcortex[prefrontalcortex_DA])
connect_multimeter(prefrontalcortex[prefrontalcortex_DA])
connect_detector(striatum[striatum_5HT])
connect_multimeter(striatum[striatum_5HT])
connect_detector(striatum[striatum_DA])
connect_multimeter(striatum[striatum_DA])
connect_detector(amygdala[amygdala_5HT])
connect_multimeter(amygdala[amygdala_5HT])
connect_detector(substantianigra[substantianigra_5HT])
connect_multimeter(substantianigra[substantianigra_5HT])
connect_detector(substantianigra[substantianigra_DA])
connect_multimeter(substantianigra[substantianigra_DA])
endbuild = datetime.datetime.now()
simulate()
get_log(startbuild, endbuild)
save(GUI=status_gui)
|
vitaliykomarov/NEUCOGAR
|
nest/serotonin/scripts/generated_scripts/neuromodulaton.py
|
Python
|
gpl-2.0
| 14,235
|
# -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,unicodedata
def get(title):
if title == None: return
title = re.sub('&#(\d+);', '', title)
title = re.sub('(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title)
title = title.replace('"', '\"').replace('&', '&')
title = re.sub('\n|([[].+?[]])|([(].+?[)])|\s(vs|v[.])\s|(:|;|-|"|,|\'|\_|\.|\?)|\s', '', title).lower()
return title
def get_simple(title):
if title == None: return
title = title.lower()
title = re.sub('(\d{4})', '', title)
title = re.sub('&#(\d+);', '', title)
title = re.sub('(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title)
title = title.replace('"', '\"').replace('&', '&')
title = re.sub('\n|\(|\)|\[|\]|\{|\}|\s(vs|v[.])\s|(:|;|-|"|,|\'|\_|\.|\?)|\s', '', title).lower()
return title
def getsearch(title):
if title == None: return
title = title.lower()
title = re.sub('&#(\d+);', '', title)
title = re.sub('(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title)
title = title.replace('"', '\"').replace('&', '&')
title = re.sub('\\\|/|-|:|;|\*|\?|"|\'|<|>|\|', '', title).lower()
return title
def query(title):
if title == None: return
title = title.replace('\'', '').rsplit(':', 1)[0]
return title
def normalize(title):
try:
try: return title.decode('ascii').encode("utf-8")
except: pass
return str( ''.join(c for c in unicodedata.normalize('NFKD', unicode( title.decode('utf-8') )) if unicodedata.category(c) != 'Mn') )
except:
return title
|
MoRgUiJu/morguiju.repo
|
plugin.video.exodus/resources/lib/modules/cleantitle.py
|
Python
|
gpl-2.0
| 2,253
|
#mouseHandler.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2007 NVDA Contributors <http://www.nvda-project.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import time
import tones
import ctypes
import winUser
import queueHandler
import api
import screenBitmap
import speech
import globalVars
import eventHandler
from logHandler import log
import config
import winInputHook
WM_MOUSEMOVE=0x0200
WM_LBUTTONDOWN=0x0201
WM_LBUTTONUP=0x0202
WM_LBUTTONDBLCLK=0x0203
WM_RBUTTONDOWN=0x0204
WM_RBUTTONUP=0x0205
WM_RBUTTONDBLCLK=0x0206
curMousePos=(0,0)
mouseMoved=False
curMouseShape=""
mouseShapeChanged=0
scrBmpObj=None
#: The time (in seconds) at which the last mouse event occurred.
#: @type: float
lastMouseEventTime=0
def updateMouseShape(name):
global curMouseShape, mouseShapeChanged
if not name or name==curMouseShape:
return
curMouseShape=name
mouseShapeChanged=1
def playAudioCoordinates(x, y, screenWidth, screenHeight, detectBrightness=True,blurFactor=0):
minPitch=config.conf['mouse']['audioCoordinates_minPitch']
maxPitch=config.conf['mouse']['audioCoordinates_maxPitch']
curPitch=minPitch+((maxPitch-minPitch)*((screenHeight-y)/float(screenHeight)))
if detectBrightness:
startX=min(max(x-blurFactor,0),screenWidth)
width=min((x+blurFactor+1)-startX,screenWidth)
startY=min(max(y-blurFactor,0),screenHeight)
height=min((y+blurFactor+1)-startY,screenHeight)
grey=screenBitmap.rgbPixelBrightness(scrBmpObj.captureImage(startX,startY,width,height)[0][0])
brightness=grey/255.0
minBrightness=config.conf['mouse']['audioCoordinates_minVolume']
maxBrightness=config.conf['mouse']['audioCoordinates_maxVolume']
brightness=(brightness*(maxBrightness-minBrightness))+minBrightness
else:
brightness=config.conf['mouse']['audioCoordinates_maxVolume']
leftVolume=int((85*((screenWidth-float(x))/screenWidth))*brightness)
rightVolume=int((85*(float(x)/screenWidth))*brightness)
tones.beep(curPitch,40,left=leftVolume,right=rightVolume)
#Internal mouse event
def internal_mouseEvent(msg,x,y,injected):
global mouseMoved, curMousePos, lastMouseEventTime
lastMouseEventTime=time.time()
if injected:
return True
if not config.conf['mouse']['enableMouseTracking']:
return True
try:
curMousePos=(x,y)
if msg==WM_MOUSEMOVE:
mouseMoved=True
elif msg in (WM_LBUTTONDOWN,WM_RBUTTONDOWN):
queueHandler.queueFunction(queueHandler.eventQueue,speech.cancelSpeech)
except:
log.error("", exc_info=True)
return True
def executeMouseMoveEvent(x,y):
global currentMouseWindow
desktopObject=api.getDesktopObject()
screenLeft,screenTop,screenWidth,screenHeight=desktopObject.location
x=min(max(screenLeft,x),(screenLeft+screenWidth)-1)
y=min(max(screenTop,y),(screenTop+screenHeight)-1)
if config.conf["mouse"]["audioCoordinatesOnMouseMove"]:
playAudioCoordinates(x,y,screenWidth,screenHeight,config.conf['mouse']['audioCoordinates_detectBrightness'],config.conf['mouse']['audioCoordinates_blurFactor'])
oldMouseObject=api.getMouseObject()
mouseObject=desktopObject.objectFromPoint(x,y)
while mouseObject and mouseObject.beTransparentToMouse:
mouseObject=mouseObject.parent
if not mouseObject:
return
if oldMouseObject==mouseObject:
mouseObject=oldMouseObject
else:
api.setMouseObject(mouseObject)
try:
eventHandler.executeEvent("mouseMove",mouseObject,x=x,y=y)
oldMouseObject=mouseObject
except:
log.error("api.notifyMouseMoved", exc_info=True)
#Register internal mouse event
def initialize():
global curMousePos, scrBmpObj
scrBmpObj=screenBitmap.ScreenBitmap(1,1)
(x,y)=winUser.getCursorPos()
desktopObject=api.getDesktopObject()
try:
mouseObject=desktopObject.objectFromPoint(x,y)
except:
log.exception("Error retrieving initial mouse object")
mouseObject=None
if not mouseObject:
mouseObject=api.getDesktopObject()
api.setMouseObject(mouseObject)
curMousePos=(x,y)
winInputHook.initialize()
winInputHook.setCallbacks(mouse=internal_mouseEvent)
def pumpAll():
global mouseMoved, curMousePos, mouseShapeChanged, curMouseShape
if mouseMoved:
mouseMoved=False
(x,y)=curMousePos
executeMouseMoveEvent(x,y)
if config.conf["mouse"]["reportMouseShapeChanges"] and mouseShapeChanged>0:
if mouseShapeChanged==10:
mouseShapeChanged=0
# Translators: Reported when mouse cursor shape changes (example output: edit cursor).
speech.speakMessage(_("%s cursor")%curMouseShape)
else:
mouseShapeChanged+=1
def terminate():
global scrBmpObj
scrBmpObj=None
winInputHook.terminate()
|
ckundo/nvda
|
source/mouseHandler.py
|
Python
|
gpl-2.0
| 4,705
|
from pyx import *
c = canvas.canvas()
c.stroke(path.curve(0, 0, 0, 4, 2, 4, 3, 3),
[style.linewidth.THICK, style.linestyle.dashed, color.rgb.blue,
deco.earrow([deco.stroked([color.rgb.red, style.linejoin.round]),
deco.filled([color.rgb.green])], size=1)])
c.writeEPSfile("arrow")
c.writePDFfile("arrow")
c.writeSVGfile("arrow")
|
mjg/PyX-svn
|
examples/drawing/arrow.py
|
Python
|
gpl-2.0
| 371
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from ctypes import c_void_p, c_long, c_ulong, c_longlong, c_ulonglong, c_short, c_ushort, c_wchar_p, c_byte, c_size_t
from ctypes import byref, Structure, Union, POINTER, WINFUNCTYPE, HRESULT, oledll, WinDLL, cast, create_string_buffer
import ctypes
import urllib2
#------------------------------------------------------------------------------
# Constants that are used in COM operations
VT_EMPTY = 0
VT_NULL = 1
VT_I2 = 2
VT_I4 = 3
VT_BSTR = 8
VT_BOOL = 11
VT_I1 = 16
VT_UI1 = 17
VT_UI2 = 18
VT_UI4 = 19
VT_I8 = 20
VT_UI8 = 21
VT_ARRAY = 8192
HTTPREQUEST_PROXYSETTING_PROXY = 2
HTTPREQUEST_SETCREDENTIALS_FOR_PROXY = 1
HTTPREQUEST_PROXY_SETTING = c_long
HTTPREQUEST_SETCREDENTIALS_FLAGS = c_long
#------------------------------------------------------------------------------
# Com related APIs that are used.
_ole32 = oledll.ole32
_oleaut32 = WinDLL('oleaut32')
_CLSIDFromString = _ole32.CLSIDFromString
_CoInitialize = _ole32.CoInitialize
_CoInitialize.argtypes = [c_void_p]
_CoCreateInstance = _ole32.CoCreateInstance
_SysAllocString = _oleaut32.SysAllocString
_SysAllocString.restype = c_void_p
_SysAllocString.argtypes = [c_wchar_p]
_SysFreeString = _oleaut32.SysFreeString
_SysFreeString.argtypes = [c_void_p]
#SAFEARRAY*
#SafeArrayCreateVector(_In_ VARTYPE vt,_In_ LONG lLbound,_In_ ULONG cElements);
_SafeArrayCreateVector = _oleaut32.SafeArrayCreateVector
_SafeArrayCreateVector.restype = c_void_p
_SafeArrayCreateVector.argtypes = [c_ushort, c_long, c_ulong]
#HRESULT
#SafeArrayAccessData(_In_ SAFEARRAY *psa, _Out_ void **ppvData);
_SafeArrayAccessData = _oleaut32.SafeArrayAccessData
_SafeArrayAccessData.argtypes = [c_void_p, POINTER(c_void_p)]
#HRESULT
#SafeArrayUnaccessData(_In_ SAFEARRAY *psa);
_SafeArrayUnaccessData = _oleaut32.SafeArrayUnaccessData
_SafeArrayUnaccessData.argtypes = [c_void_p]
#HRESULT
#SafeArrayGetUBound(_In_ SAFEARRAY *psa, _In_ UINT nDim, _Out_ LONG *plUbound);
_SafeArrayGetUBound = _oleaut32.SafeArrayGetUBound
_SafeArrayGetUBound.argtypes = [c_void_p, c_ulong, POINTER(c_long)]
#------------------------------------------------------------------------------
class BSTR(c_wchar_p):
''' BSTR class in python. '''
def __init__(self, value):
super(BSTR, self).__init__(_SysAllocString(value))
def __del__(self):
_SysFreeString(self)
class VARIANT(Structure):
'''
VARIANT structure in python. Does not match the definition in
MSDN exactly & it is only mapping the used fields. Field names are also
slighty different.
'''
class _tagData(Union):
class _tagRecord(Structure):
_fields_= [('pvoid', c_void_p), ('precord', c_void_p)]
_fields_ = [('llval', c_longlong),
('ullval', c_ulonglong),
('lval', c_long),
('ulval', c_ulong),
('ival', c_short),
('boolval', c_ushort),
('bstrval', BSTR),
('parray', c_void_p),
('record', _tagRecord)]
_fields_ = [('vt', c_ushort),
('wReserved1', c_ushort),
('wReserved2', c_ushort),
('wReserved3', c_ushort),
('vdata', _tagData)]
@staticmethod
def create_empty():
variant = VARIANT()
variant.vt = VT_EMPTY
variant.vdata.llval = 0
return variant
@staticmethod
def create_safearray_from_str(text):
variant = VARIANT()
variant.vt = VT_ARRAY | VT_UI1
length = len(text)
variant.vdata.parray = _SafeArrayCreateVector(VT_UI1, 0, length)
pvdata = c_void_p()
_SafeArrayAccessData(variant.vdata.parray, byref(pvdata))
ctypes.memmove(pvdata, text, length)
_SafeArrayUnaccessData(variant.vdata.parray)
return variant
@staticmethod
def create_bstr_from_str(text):
variant = VARIANT()
variant.vt = VT_BSTR
variant.vdata.bstrval = BSTR(text)
return variant
@staticmethod
def create_bool_false():
variant = VARIANT()
variant.vt = VT_BOOL
variant.vdata.boolval = 0
return variant
def is_safearray_of_bytes(self):
return self.vt == VT_ARRAY | VT_UI1
def str_from_safearray(self):
assert self.vt == VT_ARRAY | VT_UI1
pvdata = c_void_p()
count = c_long()
_SafeArrayGetUBound(self.vdata.parray, 1, byref(count))
count = c_long(count.value + 1)
_SafeArrayAccessData(self.vdata.parray, byref(pvdata))
text = ctypes.string_at(pvdata, count)
_SafeArrayUnaccessData(self.vdata.parray)
return text
def __del__(self):
_VariantClear(self)
#HRESULT VariantClear(_Inout_ VARIANTARG *pvarg);
_VariantClear = _oleaut32.VariantClear
_VariantClear.argtypes = [POINTER(VARIANT)]
class GUID(Structure):
''' GUID structure in python. '''
_fields_ = [("data1", c_ulong),
("data2", c_ushort),
("data3", c_ushort),
("data4", c_byte*8)]
def __init__(self, name=None):
if name is not None:
_CLSIDFromString(unicode(name), byref(self))
class _WinHttpRequest(c_void_p):
'''
Maps the Com API to Python class functions. Not all methods in IWinHttpWebRequest
are mapped - only the methods we use.
'''
_AddRef = WINFUNCTYPE(c_long)(1, 'AddRef')
_Release = WINFUNCTYPE(c_long)(2, 'Release')
_SetProxy = WINFUNCTYPE(HRESULT, HTTPREQUEST_PROXY_SETTING, VARIANT, VARIANT)(7, 'SetProxy')
_SetCredentials = WINFUNCTYPE(HRESULT, BSTR, BSTR, HTTPREQUEST_SETCREDENTIALS_FLAGS)(8, 'SetCredentials')
_Open = WINFUNCTYPE(HRESULT, BSTR, BSTR, VARIANT)(9, 'Open')
_SetRequestHeader = WINFUNCTYPE(HRESULT, BSTR, BSTR)(10, 'SetRequestHeader')
_GetResponseHeader = WINFUNCTYPE(HRESULT, BSTR, POINTER(c_void_p))(11, 'GetResponseHeader')
_GetAllResponseHeaders = WINFUNCTYPE(HRESULT, POINTER(c_void_p))(12, 'GetAllResponseHeaders')
_Send = WINFUNCTYPE(HRESULT, VARIANT)(13, 'Send')
_Status = WINFUNCTYPE(HRESULT, POINTER(c_long))(14, 'Status')
_StatusText = WINFUNCTYPE(HRESULT, POINTER(c_void_p))(15, 'StatusText')
_ResponseText = WINFUNCTYPE(HRESULT, POINTER(c_void_p))(16, 'ResponseText')
_ResponseBody = WINFUNCTYPE(HRESULT, POINTER(VARIANT))(17, 'ResponseBody')
_ResponseStream = WINFUNCTYPE(HRESULT, POINTER(VARIANT))(18, 'ResponseStream')
_WaitForResponse = WINFUNCTYPE(HRESULT, VARIANT, POINTER(c_ushort))(21, 'WaitForResponse')
_Abort = WINFUNCTYPE(HRESULT)(22, 'Abort')
_SetTimeouts = WINFUNCTYPE(HRESULT, c_long, c_long, c_long, c_long)(23, 'SetTimeouts')
_SetClientCertificate = WINFUNCTYPE(HRESULT, BSTR)(24, 'SetClientCertificate')
def open(self, method, url):
'''
Opens the request.
method: the request VERB 'GET', 'POST', etc.
url: the url to connect
'''
_WinHttpRequest._SetTimeouts(self, 0, 65000, 65000, 65000)
flag = VARIANT.create_bool_false()
_method = BSTR(method)
_url = BSTR(url)
_WinHttpRequest._Open(self, _method, _url, flag)
def set_request_header(self, name, value):
''' Sets the request header. '''
_name = BSTR(name)
_value = BSTR(value)
_WinHttpRequest._SetRequestHeader(self, _name, _value)
def get_all_response_headers(self):
''' Gets back all response headers. '''
bstr_headers = c_void_p()
_WinHttpRequest._GetAllResponseHeaders(self, byref(bstr_headers))
bstr_headers = ctypes.cast(bstr_headers, c_wchar_p)
headers = bstr_headers.value
_SysFreeString(bstr_headers)
return headers
def send(self, request = None):
''' Sends the request body. '''
# Sends VT_EMPTY if it is GET, HEAD request.
if request is None:
var_empty = VARIANT.create_empty()
_WinHttpRequest._Send(self, var_empty)
else: # Sends request body as SAFEArray.
_request = VARIANT.create_safearray_from_str(request)
_WinHttpRequest._Send(self, _request)
def status(self):
''' Gets status of response. '''
status = c_long()
_WinHttpRequest._Status(self, byref(status))
return int(status.value)
def status_text(self):
''' Gets status text of response. '''
bstr_status_text = c_void_p()
_WinHttpRequest._StatusText(self, byref(bstr_status_text))
bstr_status_text = ctypes.cast(bstr_status_text, c_wchar_p)
status_text = bstr_status_text.value
_SysFreeString(bstr_status_text)
return status_text
def response_body(self):
'''
Gets response body as a SAFEARRAY and converts the SAFEARRAY to str. If it is an xml
file, it always contains 3 characters before <?xml, so we remove them.
'''
var_respbody = VARIANT()
_WinHttpRequest._ResponseBody(self, byref(var_respbody))
if var_respbody.is_safearray_of_bytes():
respbody = var_respbody.str_from_safearray()
if respbody[3:].startswith('<?xml') and respbody.startswith('\xef\xbb\xbf'):
respbody = respbody[3:]
return respbody
else:
return ''
def set_client_certificate(self, certificate):
'''Sets client certificate for the request. '''
_certificate = BSTR(certificate)
_WinHttpRequest._SetClientCertificate(self, _certificate)
def set_tunnel(self, host, port):
''' Sets up the host and the port for the HTTP CONNECT Tunnelling.'''
url = host
if port:
url = url + u':' + port
var_host = VARIANT.create_bstr_from_str(url)
var_empty = VARIANT.create_empty()
_WinHttpRequest._SetProxy(self, HTTPREQUEST_PROXYSETTING_PROXY, var_host, var_empty)
def set_proxy_credentials(self, user, password):
_WinHttpRequest._SetCredentials(self, BSTR(user), BSTR(password), HTTPREQUEST_SETCREDENTIALS_FOR_PROXY)
def __del__(self):
if self.value is not None:
_WinHttpRequest._Release(self)
class _Response:
''' Response class corresponding to the response returned from httplib HTTPConnection. '''
def __init__(self, _status, _status_text, _length, _headers, _respbody):
self.status = _status
self.reason = _status_text
self.length = _length
self.headers = _headers
self.respbody = _respbody
def getheaders(self):
'''Returns response headers.'''
return self.headers
def read(self, _length):
'''Returns resonse body. '''
return self.respbody[:_length]
class _HTTPConnection:
''' Class corresponding to httplib HTTPConnection class. '''
def __init__(self, host, cert_file=None, key_file=None, protocol='http'):
''' initialize the IWinHttpWebRequest Com Object.'''
self.host = unicode(host)
self.cert_file = cert_file
self._httprequest = _WinHttpRequest()
self.protocol = protocol
clsid = GUID('{2087C2F4-2CEF-4953-A8AB-66779B670495}')
iid = GUID('{016FE2EC-B2C8-45F8-B23B-39E53A75396B}')
_CoInitialize(None)
_CoCreateInstance(byref(clsid), 0, 1, byref(iid), byref(self._httprequest))
def set_tunnel(self, host, port=None, headers=None):
''' Sets up the host and the port for the HTTP CONNECT Tunnelling. '''
self._httprequest.set_tunnel(unicode(host), unicode(str(port)))
def set_proxy_credentials(self, user, password):
self._httprequest.set_proxy_credentials(unicode(user), unicode(password))
def putrequest(self, method, uri):
''' Connects to host and sends the request. '''
protocol = unicode(self.protocol + '://')
url = protocol + self.host + unicode(uri)
self._httprequest.open(unicode(method), url)
#sets certificate for the connection if cert_file is set.
if self.cert_file is not None:
self._httprequest.set_client_certificate(unicode(self.cert_file))
def putheader(self, name, value):
''' Sends the headers of request. '''
self._httprequest.set_request_header(str(name).decode('utf-8'),
str(value).decode('utf-8'))
def endheaders(self):
''' No operation. Exists only to provide the same interface of httplib HTTPConnection.'''
pass
def send(self, request_body):
''' Sends request body. '''
if not request_body:
self._httprequest.send()
else:
self._httprequest.send(request_body)
def getresponse(self):
''' Gets the response and generates the _Response object'''
status = self._httprequest.status()
status_text = self._httprequest.status_text()
resp_headers = self._httprequest.get_all_response_headers()
fixed_headers = []
for resp_header in resp_headers.split('\n'):
if (resp_header.startswith('\t') or resp_header.startswith(' ')) and fixed_headers:
# append to previous header
fixed_headers[-1] += resp_header
else:
fixed_headers.append(resp_header)
headers = []
for resp_header in fixed_headers:
if ':' in resp_header:
pos = resp_header.find(':')
headers.append((resp_header[:pos].lower(), resp_header[pos+1:].strip()))
body = self._httprequest.response_body()
length = len(body)
return _Response(status, status_text, length, headers, body)
|
varunagrawal/azure-services
|
varunagrawal/site-packages/azure/http/winhttp.py
|
Python
|
gpl-2.0
| 14,499
|
# -*- coding: utf-8 -*-
# Sid Meier's Civilization 4
# Copyright Firaxis Games 2005
#
# Pitboss admin framework
# Dan McGarry 3-24-05
#
import sys
import os
import time
import string
from threading import Thread
import wx
import wx.lib.scrolledpanel
from CvPythonExtensions import * # Do not remove this
import CvPythonExtensions as E
import Webserver
PB = E.CyPitboss()
gc = E.CyGlobalContext()
LT = E.CyTranslator()
# Add Altroot python folder as import path
pythonDir = os.path.join(gc.getAltrootDir(), '..', 'Python', 'v9')
if pythonDir not in sys.path:
sys.path.append(pythonDir)
from Settings import Settings
PbSettings = Settings() #.instance()
CHAT_LOG_MAX_LEN = 50
# Pipe error messages into a file to avoid popup windows
errorLogFile = PbSettings.get("errorLogFile", None)
if errorLogFile is not None:
logName = os.path.join(gc.getAltrootDir(), str(errorLogFile))
try:
os.unlink(logName, logName+".old")
except Exception, e:
pass
try:
os.rename(logName, logName+".old")
except Exception, e:
pass
sys.stderr = open(logName, 'w')
playerWasOnline = [] # To track login and logout events
for _ in range(gc.getMAX_CIV_PLAYERS()):
playerWasOnline.append(False)
#
# resource IDs
#
ID_ABOUT = 101
ID_SAVE = 102
ID_EXIT = 103
#
# admin frame class
#
class AdminFrame(wx.Frame):
bShellInit = False
# def __init__(self, parent, ID, title): # Orig
def __init__(self, parent, ID, title, adminApp):
"constructor"
super(AdminFrame, self).__init__(parent, ID, title)
self.adminApp = adminApp
# self.bGui = (0 == int(PbSettings.get("noGui", 0))) # Old key name
self.bGui = (int(PbSettings.get("gui", 1)) != 0)
self.bAutostart = (int(PbSettings.get("autostart", 0)) != 0)
self.bShell = (int(PbSettings.get("shell", {}).get("enable", 0)) != 0)
self.civ4Shell = {} # Holds shell object and some extra variables
self.gameTurn = -1 # For pylint
if self.bGui:
self.createGui(parent, ID, title)
if self.bShell:
self.bShellInit = True # Init shell on first update() call
# self.init_shell() # To early?!
# Webserver
self.webserver = Webserver.ThreadedHTTPServer(
(PbSettings['webserver']['host'], PbSettings['webserver']['port']),
Webserver.HTTPRequestHandler)
self.t = Thread(target=self.webserver.serve_forever)
self.t.setDaemon(True)
self.t.start()
# Periodical game data upload
self.webupload = None
if(PbSettings['webfrontend']['sendPeriodicalData'] != 0):
self.webupload = Webserver.PerpetualTimer(PbSettings['webfrontend'],
self.webserver, True)
self.webupload.start()
bRestorePassword = (int(PbSettings.get("restorePassword", 0)) != 0)
if bRestorePassword:
adminPwd = str(PbSettings.get("save", {}).get("adminpw", ""))
if hasattr(E.CyGame(), "setAdminPassword"):
E.CyGame().setAdminPassword(adminPwd, "")
PbSettings.createSave("PostUpdate.CivBeyondSwordSave")
PbSettings.lock.acquire()
PbSettings.pop("restorePassword", None)
PbSettings.lock.release()
PbSettings.save()
else:
PB.consoleOut("restorePassword-Flag is set in pbSettings, "
"but DLL does not contain setAdminPassword "
"method.")
def init_shell(self):
self.civ4Shell = {
"glob": globals(),
"loc": locals(),
"shell": start_shell(PbSettings.get("shell", {}), "pb_admin")
}
if self.civ4Shell.get("shell"):
PB.consoleOut("Init shell interface in PbAdmin")
self.civ4Shell["shell"].set_admin_iface(self.adminApp)
self.civ4Shell["shell"].init()
else:
self.bShell = False
def createGui(self, parent, ID, title):
# Create the menu
wx.Frame.__init__(self, parent, ID, title,
wx.DefaultPosition, wx.Size(675, 480))
menu = wx.Menu()
menu.Append(ID_ABOUT, (LT.getText("TXT_KEY_PITBOSS_ABOUT", ())), (LT.getText("TXT_KEY_PITBOSS_ABOUT_TEXT", ())))
menu.AppendSeparator()
menu.Append(ID_SAVE, (LT.getText("TXT_KEY_PITBOSS_SAVE", ())), (LT.getText("TXT_KEY_PITBOSS_SAVE_TEXT", ())))
menu.Append(ID_EXIT, (LT.getText("TXT_KEY_PITBOSS_EXIT", ())), (LT.getText("TXT_KEY_PITBOSS_EXIT_TEXT", ())))
menuBar = wx.MenuBar()
strFile = LT.getText("TXT_KEY_PITBOSS_FILE", ())
strFile = LT.stripHTML(strFile)
menuBar.Append(menu, strFile)
self.SetMenuBar(menuBar)
# Create our arrays of information and controls
self.nameArray = []
self.pingArray = []
self.scoreArray = []
self.kickArray = []
pageSizer = wx.BoxSizer(wx.VERTICAL)
# Add the game name and date
self.gameTurn = PB.getGameturn()
self.title = wx.StaticText(self, -1, PB.getGamename() + " - " + PB.getGamedate(False))
font = wx.Font(18, wx.SWISS, wx.NORMAL, wx.NORMAL)
self.title.SetFont(font)
self.title.SetSize(self.title.GetBestSize())
pageSizer.Add(self.title, 0, wx.ALL, 5)
# Add the turn timer if we have one
if (PB.getTurnTimer()):
timerSizer = wx.BoxSizer(wx.HORIZONTAL)
# Add a button to allow turn timer modification
timerChangeButton = wx.Button(self, -1, LT.getText("TXT_KEY_MP_OPTION_TURN_TIMER", ()))
self.Bind(wx.EVT_BUTTON, self.OnChangeTimer, timerChangeButton)
timerSizer.Add(timerChangeButton, 0, wx.ALL, 5)
timerPauseButton = wx.Button(self, -1, LT.getText("TXT_KEY_MOD_PAUSE_TIMER", ()))
self.Bind(wx.EVT_BUTTON, self.OnChangePause, timerPauseButton)
timerSizer.Add(timerPauseButton, 0, wx.ALL, 5)
self.timerDisplay = wx.StaticText(self, -1, "")
timerStr = self.getTimerString(PB.getTurnTimeLeft())
self.timerDisplay.SetLabel(timerStr)
font = wx.Font(16, wx.SWISS, wx.NORMAL, wx.NORMAL)
self.timerDisplay.SetFont(font)
self.timerDisplay.SetSize(self.timerDisplay.GetBestSize())
timerSizer.Add(self.timerDisplay, 0, wx.ALL, 5)
pageSizer.Add(timerSizer, 0, wx.ALL, 5)
infoSizer = wx.BoxSizer(wx.HORIZONTAL)
leftSizer = wx.BoxSizer(wx.VERTICAL)
playerPanel = wx.lib.scrolledpanel.ScrolledPanel(self, -1, size=(370, 280), style=wx.DOUBLE_BORDER)
playerSizer = wx.BoxSizer(wx.VERTICAL)
# Create a row for each player in the game
rowNum = 0
for rowNum in range(gc.getMAX_CIV_PLAYERS()):
if (gc.getPlayer(rowNum).isEverAlive()):
# Create the border box
border = wx.StaticBox(playerPanel, -1, (LT.getText("TXT_KEY_PITBOSS_PLAYER", (rowNum+1, ))), (0, (rowNum*30)))
# Create the layout mgr
rowSizer = wx.StaticBoxSizer(border, wx.HORIZONTAL)
# Player name
itemSizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(playerPanel, -1, (LT.getText("TXT_KEY_PITBOSS_WHO", ())))
txtValue = wx.StaticText(playerPanel, rowNum, "", size=wx.Size(100, 13))
itemSizer.Add(lbl)
itemSizer.Add(txtValue)
rowSizer.Add(itemSizer, 0, wx.ALL, 5)
self.nameArray.append(txtValue)
# Ping times
itemSizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(playerPanel, -1, (LT.getText("TXT_KEY_PITBOSS_PING", ())))
txtValue = wx.StaticText(playerPanel, rowNum, "", size=wx.Size(70, 13))
itemSizer.Add(lbl)
itemSizer.Add(txtValue)
rowSizer.Add(itemSizer, 0, wx.ALL, 5)
self.pingArray.append(txtValue)
# Scores
itemSizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(playerPanel, -1, (LT.getText("TXT_KEY_PITBOSS_SCORE", ())))
txtValue = wx.StaticText(playerPanel, rowNum, "", size=wx.Size(30, 13))
itemSizer.Add(lbl)
itemSizer.Add(txtValue)
rowSizer.Add(itemSizer, 0, wx.ALL, 5)
self.scoreArray.append(txtValue)
# Kick buttons
kickButton = wx.Button(playerPanel, rowNum, (LT.getText("TXT_KEY_PITBOSS_KICK", ())))
rowSizer.Add(kickButton, 0, wx.ALL, 5)
kickButton.Disable()
self.Bind(wx.EVT_BUTTON, self.OnKick, kickButton)
self.kickArray.append(kickButton)
playerSizer.Add(rowSizer, 0, wx.ALL, 5)
playerPanel.SetSizer(playerSizer)
playerPanel.SetAutoLayout(1)
playerPanel.SetupScrolling()
leftSizer.Add(playerPanel, 0, wx.ALL, 5)
# Add a button row
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
# Add the save game button
saveButton = wx.Button(self, -1, (LT.getText("TXT_KEY_PITBOSS_SAVE_GAME", ())))
self.Bind(wx.EVT_BUTTON, self.OnSave, saveButton)
buttonSizer.Add(saveButton, 0, wx.ALL, 5)
# Add the exit game button
exitButton = wx.Button(self, -1, (LT.getText("TXT_KEY_MAIN_MENU_EXIT_GAME", ())))
self.Bind(wx.EVT_BUTTON, self.OnExit, exitButton)
buttonSizer.Add(exitButton, 0, wx.ALL, 5)
leftSizer.Add(buttonSizer, 0, wx.ALL, 5)
# Add the left area to the info area
infoSizer.Add(leftSizer, 0, wx.ALL, 5)
# Now create the message area
messageSizer = wx.BoxSizer(wx.VERTICAL)
# Create the MotD Panel
motdBorder = wx.StaticBox(self, -1, LT.getText("TXT_KEY_PITBOSS_MOTD_TITLE", ()))
motdSizer = wx.StaticBoxSizer(motdBorder, wx.VERTICAL)
# Check box whether to use MotD or not
self.motdCheckBox = wx.CheckBox(self, -1, LT.getText("TXT_KEY_PITBOSS_MOTD_TOGGLE", ()))
self.motdCheckBox.SetValue(len(PbSettings.get('MotD', u'')) > 0)
motdSizer.Add(self.motdCheckBox, 0, wx.TOP, 5)
# Add edit box displaying current MotD
self.motdDisplayBox = wx.TextCtrl(self, -1, "", size=(225, 50), style=wx.TE_MULTILINE | wx.TE_READONLY)
self.motdDisplayBox.SetHelpText(LT.getText("TXT_KEY_PITBOSS_MOTD_HELP", ()))
msg_unicode = PbSettings.get('MotD', u'')
self.motdDisplayBox.SetValue(msg_unicode.encode('cp1252'))
motdSizer.Add(self.motdDisplayBox, 0, wx.ALL, 5)
# Add a button to allow motd modification
motdChangeButton = wx.Button(self, -1, LT.getText("TXT_KEY_PITBOSS_MOTD_CHANGE", ()))
motdChangeButton.SetHelpText(LT.getText("TXT_KEY_PITBOSS_MOTD_CHANGE_HELP", ()))
self.Bind(wx.EVT_BUTTON, self.OnChangeMotD, motdChangeButton)
motdSizer.Add(motdChangeButton, 0, wx.ALL, 5)
# Add the motd area to the message area
messageSizer.Add(motdSizer, 0, wx.ALL, 5)
# Create the dialog panel
dialogBorder = wx.StaticBox(self, -1, LT.getText("TXT_KEY_PITBOSS_CHAT_TITLE", ()))
dialogSizer = wx.StaticBoxSizer(dialogBorder, wx.VERTICAL)
# Chat log
self.chatLog = wx.TextCtrl(self, -1, "", size=(225, 100), style=wx.TE_MULTILINE | wx.TE_READONLY)
self.chatLog.SetHelpText(LT.getText("TXT_KEY_PITBOSS_CHAT_LOG_HELP", ()))
dialogSizer.Add(self.chatLog, 0, wx.ALL, 5)
# Chat edit
self.chatEdit = wx.TextCtrl(self, -1, "", size=(225, -1), style=wx.TE_PROCESS_ENTER)
self.chatEdit.SetHelpText(LT.getText("TXT_KEY_PITBOSS_CHAT_EDIT_HELP", ()))
dialogSizer.Add(self.chatEdit, 0, wx.ALL, 5)
self.Bind(wx.EVT_TEXT_ENTER, self.OnSendChat, self.chatEdit)
# Add the dialog area to the message area
messageSizer.Add(dialogSizer, 0, wx.ALL, 5)
# Add the message area to our info area
infoSizer.Add(messageSizer, 0, wx.ALL, 5)
# Add the info area to the page
pageSizer.Add(infoSizer, 0, wx.ALL, 5)
self.SetSizer(pageSizer)
# Register the event handlers
wx.EVT_MENU(self, ID_ABOUT, self.OnAbout)
wx.EVT_MENU(self, ID_SAVE, self.OnSave)
wx.EVT_MENU(self, ID_EXIT, self.OnExit)
# Other handlers
self.Bind(wx.EVT_CLOSE, self.OnCloseWindow)
def getTimerString(self, turnSlices):
# Get the time string for the turn timer...
# See if we are out of time
# Only update every second
retVal = self.timerDisplay.GetLabel()
if (turnSlices % 4 == 0):
if (turnSlices < 0):
retVal = "0:00:00"
else:
numHours = 0
numMinutes = 0
numSeconds = turnSlices/4
if (numSeconds > 59):
numMinutes = numSeconds/60
numSeconds = numSeconds % 60
if (numMinutes > 59):
numHours = numMinutes/60
numMinutes = numMinutes % 60
retVal = ""
if (numHours > 0):
retVal = str(numHours)
else:
retVal = "0"
retVal += ":"
if (numMinutes > 9):
retVal += str(numMinutes)
elif (numMinutes > 0):
retVal += "0" + str(numMinutes)
else:
retVal += "00"
retVal += ":"
if (numSeconds > 9):
retVal += str(numSeconds)
elif (numSeconds > 0):
retVal += "0" + str(numSeconds)
else:
retVal += "00"
return retVal
def update(self):
if self.bGui:
# We have the widgets created, set the values...
if (self.gameTurn != PB.getGameturn()):
self.title.SetLabel(PB.getGamename() + " - " + PB.getGamedate(False))
self.gameTurn = PB.getGameturn()
if (PB.getTurnTimer()):
timerStr = self.getTimerString(PB.getTurnTimeLeft())
if (timerStr != self.timerDisplay.GetLabel()):
self.timerDisplay.SetLabel(timerStr)
for rowNum in range(gc.getMAX_CIV_PLAYERS()):
if (gc.getPlayer(rowNum).isEverAlive()):
# Get the player data
playerData = PB.getPlayerAdminData(rowNum)
# Set the values
nameDisplay = ""
if (not playerData.bTurnActive):
nameDisplay += "*"
# PB Mod: Fix Non-ASCII decoding error
# .getName()-function returns unicode string
# which needs to be proper encoded.
nameDisplay += playerData.getName().encode('cp1252')
#nameDisplay += "Player %i" % (rowNum+1)
if (nameDisplay != self.nameArray[rowNum].GetLabel()):
self.nameArray[rowNum].SetLabel(nameDisplay)
if ((playerData.getPing()) != self.pingArray[rowNum].GetLabel()):
self.pingArray[rowNum].SetLabel((playerData.getPing()))
if ((playerData.getScore()) != self.scoreArray[rowNum].GetLabel()):
self.scoreArray[rowNum].SetLabel((playerData.getScore()))
bEnabled = self.kickArray[rowNum].IsEnabled()
bShouldEnable = (playerData.bHuman and playerData.bClaimed)
if (bEnabled != bShouldEnable):
if (bShouldEnable):
self.kickArray[rowNum].Enable(True)
else:
self.kickArray[rowNum].Disable()
# Create save on login and logout events
for rowNum in range(gc.getMAX_CIV_PLAYERS()):
# gcPlayer = gc.getPlayer(rowNum)
bOnline = (PB.getPlayerAdminData(rowNum).getPing()[1] == "[")
if (bOnline != playerWasOnline[rowNum]):
playerName = PB.getPlayerAdminData(rowNum).getName().encode('cp1252')
PbSettings.createPlayerRecoverySave(rowNum, playerName, bOnline)
playerWasOnline[rowNum] = bOnline
if self.bShell:
if self.bShellInit:
self.bShellInit = False
self.init_shell()
else:
try:
self.civ4Shell["shell"].update(
self.civ4Shell["glob"],
self.civ4Shell["loc"])
except Exception, e:
PB.consoleOut("Civ4Shell error:" + str(e))
if not self.bGui:
try:
# This try-catch clause does not omit all python error windows
# because there exists a second .sleep call in PbMain.py. This class
# can not be changed by a modification.
time.sleep(0.1)
except KeyboardInterrupt:
self.OnExit(None)
def OnKick(self, event):
"'kick' event handler"
rowNum = event.GetId()
dlg = wx.MessageDialog(
self,
(LT.getText("TXT_KEY_PITBOSS_KICK_VERIFY", (PB.getName(rowNum), ))),
(LT.getText("TXT_KEY_PITBOSS_KICK_VERIFY_TITLE", ())),
wx.YES_NO | wx.ICON_QUESTION)
if (dlg.ShowModal() == wx.ID_YES):
PB.kick(rowNum)
dlg.Destroy()
def OnAbout(self, event):
"'about' event handler"
dlg = wx.MessageDialog(
self,
(LT.getText("TXT_KEY_PITBOSS_VERSION", (PB.getVersion(), ))),
(LT.getText("TXT_KEY_PITBOSS_ABOUT_BOX_TITLE", ())),
wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
def OnSave(self, event):
"'save' event handler"
dlg = wx.FileDialog(
self, message=(LT.getText("TXT_KEY_PITBOSS_SAVE_AS", ())),
# defaultDir=r".\Saves\multi",
defaultDir=(gc.getAltrootDir() + r".\Saves\multi"),
defaultFile="Pitboss_"+PB.getGamedate(True)+".CivBeyondSwordSave",
wildcard=(LT.getText("TXT_KEY_PITBOSS_SAVE_AS_TEXT", ())) +
" (*.CivBeyondSwordSave)|*.CivBeyondSwordSave",
style=wx.SAVE | wx.OVERWRITE_PROMPT)
if dlg.ShowModal() == wx.ID_OK:
# Get the file name
path = dlg.GetPath()
if (path != ""):
# Got a file to save - try to save it
if (not PB.save(path)):
# Saving game failed! Let the user know
wx.MessageBox(
(LT.getText("TXT_KEY_PITBOSS_ERROR_SAVING", ())),
(LT.getText("TXT_KEY_PITBOSS_SAVE_ERROR", ())),
wx.ICON_ERROR)
else:
wx.MessageBox(
(LT.getText("TXT_KEY_PITBOSS_SAVE_SUCCESS", (path, ))),
(LT.getText("TXT_KEY_PITBOSS_SAVED", ())),
wx.ICON_INFORMATION)
dlg.Destroy()
def OnChangeMotD(self, event):
"'MotD' event handler"
# Changing MotD - pop a modal dialog
dlg = wx.TextEntryDialog(
self, LT.getText("TXT_KEY_PITBOSS_MOTD_POPUP_DESC", ()),
LT.getText("TXT_KEY_PITBOSS_MOTD_POPUP_TITLE", ()))
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
# Set the MotD
msg = dlg.GetValue() # GetValue is str in cp1252 enc
self.motdDisplayBox.SetValue(msg)
PbSettings['MotD'] = msg.decode('cp1252')
def OnChangePause(self, event):
"Turn pause event handler"
if gc.getGame().isPaused():
# gc.sendPause(-1) # effect not as expected
# gc.sendChat("RemovePause", E.ChatTargetTypes.CHATTARGET_ALL)
PB.sendChat("RemovePause")
else:
gc.sendPause(0)
self.timerDisplay.SetLabel("Game paused.")
""" Note that the index for the barbarian could
lead to an c++ exception.
A test case for the bug follows...
"""
# gc.sendPause(gc.getMAX_CIV_PLAYERS()-1)
def OnChangeTimer(self, event):
"Turn timer event handler"
# Changing Timer - pop a modal dialog
dlg = wx.TextEntryDialog(
self, LT.getText("TXT_KEY_PITBOSS_TURN_TIMER_NEW", ()),
LT.getText("TXT_KEY_MP_OPTION_TURN_TIMER", ()))
dlg.SetValue("%s" % (gc.getGame().getPitbossTurnTime(), ))
# Show the modal dialog and get the response
if dlg.ShowModal() == wx.ID_OK:
szValue = dlg.GetValue()
if szValue != "":
if not self.IsNumericString(szValue):
dlg2 = wx.MessageDialog(
self, LT.getText("TXT_KEY_PITBOSS_TURNTIMER_ERROR_DESC", ()),
LT.getText("TXT_KEY_PITBOSS_TURNTIMER_ERROR_TITLE", ()), wx.OK | wx.ICON_EXCLAMATION)
if dlg2.ShowModal() == wx.ID_OK:
# Clear out the TurnTimer Edit box
dlg.SetValue("")
else:
PB.turnTimerChanged((int)(dlg.GetValue()))
def OnSendChat(self, event):
"'Chat Send' event handler"
# Verify we have text to send
chat_msg = self.chatEdit.GetValue()
if (len(chat_msg)):
PB.sendChat(chat_msg)
self.chatEdit.SetValue("")
# PB Mod: Store text in log as unicode
self.chat_log = self.chat_log[0:CHAT_LOG_MAX_LEN-1]
self.adminApp.chat_log.append(message.decode('cp1252'))
def OnExit(self, event):
"'exit' event handler"
PB.quit()
self.webserver.shutdown()
if self.webupload:
self.webupload.cancel()
if self.bShell:
if "shell" in self.civ4Shell:
self.civ4Shell["shell"].close()
# global Civ4Shell
# Civ4Shell["shell"].close()
if event != "FromOnClose":
self.Destroy()
def OnCloseWindow(self, event):
"'close window' event handler"
# PB.quit()
self.OnExit("FromOnClose")
self.Destroy()
def IsNumericString(self, myStr):
for myChar in myStr:
if myChar not in string.digits:
return False
return True
#
# main app class
#
class AdminIFace(wx.App):
adminFrame = None
chat_log = []
# def __init__(self, arg1): # Required if not derived from wx.App
# self.OnInit()
#
def OnInit(self):
"create the admin frame"
self.adminFrame = AdminFrame(None, -1,
LT.getText("TXT_KEY_PITBOSS_SAVE_SUCCESS", (PB.getGamename(), )),
self)
if self.adminFrame.bGui:
self.adminFrame.Show(True)
self.SetTopWindow(self.adminFrame)
self.adminFrame.webserver.setPbApp(self)
return True
def update(self):
"process events - call in main loop"
if self.adminFrame: # Check avoids PyDeadObjectError
if self.adminFrame.bGui:
# Create an event loop and make it active.
# save the old one
evtloop = wx.EventLoop()
old = wx.EventLoop.GetActive()
wx.EventLoop.SetActive(evtloop)
# Update our view
self.adminFrame.update()
# This inner loop will process any GUI events
# until there are no more waiting.
while evtloop.Pending():
evtloop.Dispatch()
# Send idle events to idle handlers.
time.sleep(0.1) # Orig value was 0.01
self.ProcessIdle()
# restore old event handler
wx.EventLoop.SetActive(old)
else:
if self.adminFrame: # Check avoids PyDeadObjectError
self.adminFrame.update()
def refreshRow(self, iRow):
"Stub for refresh row..."
return True
def getMotD(self):
"Message of the day retrieval"
if self.adminFrame.bGui and self.adminFrame.motdCheckBox.GetValue():
msg = self.adminFrame.motdDisplayBox.GetValue() # str in cp1252 enc
PbSettings["MotD"] = msg.decode('cp1252')
return msg
msg = PbSettings.get('MotD', u'') # here, type is unicode
return msg
def setMotD(self, msg):
if not self.adminFrame.bGui:
return
# msg is unicode
self.adminFrame.motdDisplayBox.SetValue(msg.encode('cp1252'))
def addChatMessage(self, message):
message = LT.stripHTML(message)
# PB Mod: Store text in log as unicode
self.chat_log = self.chat_log[0:CHAT_LOG_MAX_LEN-1]
# self.chat_log.append(message.decode('cp1252'))
self.chat_log.append(message) # already decoded
if not self.adminFrame.bGui:
return
self.adminFrame.chatLog.AppendText("\n")
self.adminFrame.chatLog.AppendText(message)
def displayMessageBox(self, title, desc):
outMsg = title + ":\n" + desc
PB.consoleOut(outMsg)
def sendChatMessage(self, message, sound_variant=0):
# Use multiple modNetMessages to propagate text to users
# Encoding:
# • Send utf-8-encoded string, but restrict
# • Propagate unicode string to addChatMessage
# Function returns True or error message
if isinstance(message, unicode):
message = message.encode('utf-8')
# 1. split messages into 4*4 + 3 Bytes
first = True
n_bytes = 4*4 + 3
msg__ = message + " " * ((n_bytes - len(message)) % (n_bytes))
def bytes_to_signed_int(b1, b2, b3, b4):
# Little endian
# Converts numbers into 0-255 (negatives =>255)
b1, b2, b3, b4 = b1 & 0xFF, b2 & 0xFF, b3 & 0xFF, b4 & 0xFF
if b4 > 127:
x = (b1 << 0) | (b2 << 8) | (b3 << 16) | ((b4-128) << 24)
x = -x
else:
x = (b1 << 0) | (b2 << 8) | (b3 << 16) | (b4 << 24)
# Alternative:
# bit_len = 32-1
# u = (b1 << 0) | (b2 << 8) | (b3 << 16) | (b4 << 24)
# x = (u & ((1 << bit_len) - 1)) - (u & (1 << bit_len))
return x
while len(msg__) > 0:
# Grab first 18 bytes and shorten message
c = [ord(c) for c in msg__[:n_bytes] ]
msg__ = msg__[n_bytes:]
# 2. Mark with flags as chat text
flag = 0x70 # Mark as chat messages in iData5
if first: # First part, set bx01110100 flag
first = False
flag |= 0x04
if len(msg__) == 0: # Last part, set bx01111000 flag
flag |= 0x08
# Add sound flags (0-3)
flag |= min(3, abs(sound_variant))
# 3. Convert text chunk into signed integers
words = [bytes_to_signed_int(c[0], c[1], c[2], c[3]),
bytes_to_signed_int(c[4], c[5], c[6], c[7]),
bytes_to_signed_int(c[8], c[9], c[10], c[11]),
bytes_to_signed_int(c[12], c[13], c[14], c[15]),
bytes_to_signed_int(c[16], c[17], c[18], flag)]
# Last entry set lower 3 bytes + flags
CyMessageControl().sendModNetMessage(
words[0], words[1], words[2], words[3], words[4])
# 5. Store message in textbox.
# Add message directly to chat because method is not easly reachable
# by CvEventManager's onModNetMessage call
try:
self.addChatMessage(message.decode('utf-8'))
except Exception, e:
return "Message send, but addChatMessage failed. Error: " + str(e)
if gc.getGame().isPaused():
return "Warning: Message delayed until game is unpaused."
return True
# Short, but not working variant. Added for reference.
def modChatMessage2(self, message, sound_variant=0):
# Test of direct way: (Fails if triggered in this thread?!)
gc.getGame().setActivePlayer(gc.getBARBARIAN_PLAYER(), False)
gc.sendChat(message, -1) # Requires active player > -1
gc.getGame().setActivePlayer(-1, False)
return True
# ================ PB Mod ===================
def start_shell(shell_settings, mode=""):
if shell_settings.get("enable", 0):
import Civ4ShellBackend
shell_ip = str(shell_settings.get("ip", "127.0.0.1"))
shell_port = int(shell_settings.get("port", 3333))
shell = Civ4ShellBackend.Server(shell_ip, shell_port)
shell.set_mode(mode)
return shell
else:
return None
|
YggdrasiI/PBStats
|
PBs/Python/v9/PbAdmin.py
|
Python
|
gpl-2.0
| 29,412
|
#******************************************************************************
# *
# * ** * * * * *
# * * * * * * * * * *
# ***** * * * * ***** ** *** * * ** *** *** *
# * * * * * * * * * * * * * * * * * * * *
# * * * * * * * * * * * * * * * * * * * *
# * * ** * ** * * *** *** *** ** *** * * *
# * * * *
# ** * * *
# *
#******************************************************************************
# *
# This file is part of AQUAgpusph, a free CFD program based on SPH. *
# Copyright (C) 2012 Jose Luis Cercos Pita <jl.cercos@upm.es> *
# *
# AQUAgpusph is free software: you can redistribute it and/or modify *
# it under the terms of the GNU General Public License as published by *
# the Free Software Foundation, either version 3 of the License, or *
# (at your option) any later version. *
# *
# AQUAgpusph is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# GNU General Public License for more details. *
# *
# You should have received a copy of the GNU General Public License *
# along with AQUAgpusph. If not, see <http://www.gnu.org/licenses/>. *
# *
#******************************************************************************
import os
from os import path
import numpy as np
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def readFile(filepath):
""" Read and extract data from a file
:param filepath File ot read
"""
abspath = filepath
if not path.isabs(filepath):
abspath = path.join(path.dirname(path.abspath(__file__)), filepath)
# Read the file by lines
f = open(abspath, "r")
lines = f.readlines()
f.close()
data = []
for l in lines[1:-1]: # Skip the last line, which may be unready
l = l.strip()
while l.find(' ') != -1:
l = l.replace(' ', ' ')
fields = l.split(' ')
try:
data.append(map(float, fields))
except:
continue
# Transpose the data
return [list(d) for d in zip(*data)]
line = None
def update(frame_index):
plt.tight_layout()
try:
data = readFile('sensors.out')
t = data[0]
p = data[1]
except IndexError:
return
except FileNotFoundError:
return
try:
line.set_data(t, savgol_filter(p, 15, 3))
except ValueError:
# Not enough data yet
line.set_data(t, p)
fig = plt.figure()
ax = fig.add_subplot(111)
FNAME = path.join('@EXAMPLE_DEST_DIR@', 'lateral_water_1x.txt')
T,P,A,DADT,_,_ = np.loadtxt(FNAME,
delimiter='\t',
skiprows=1,
unpack=True)
exp_t = T
exp_p = 100.0 * P
ax.plot(exp_t,
exp_p,
label=r'$\mathrm{Experiments}$',
color="red",
linewidth=1.0)
t = [0.0]
p = [0.0]
line, = ax.plot(t,
p,
label=r'$\mathrm{SPH}$',
color="black",
linewidth=1.0)
# Set some options
ax.grid()
ax.legend(loc='best')
ax.set_xlim(0, 5)
ax.set_ylim(-1000, 5000)
ax.set_autoscale_on(False)
ax.set_xlabel(r"$t \, [\mathrm{s}]$")
ax.set_ylabel(r"$p \, [\mathrm{Pa}]$")
update(0)
ani = animation.FuncAnimation(fig, update, interval=5000)
plt.show()
|
sanguinariojoe/aquagpusph
|
examples/3D/spheric_testcase10_waveimpact/cMake/plot_p.py
|
Python
|
gpl-3.0
| 4,496
|
#!/usr/bin/env python2.7
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from string import lower as str_lower
from xml.dom import minidom
from xml.dom import Node
import sys
import compoundsuper as supermod
from compoundsuper import MixedContainer
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compounddef=None):
supermod.DoxygenType.__init__(self, version, compounddef)
def find(self, details):
return self.compounddef.find(details)
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class compounddefTypeSub(supermod.compounddefType):
def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
def find(self, details):
if self.id == details.refid:
return self
for sectiondef in self.sectiondef:
result = sectiondef.find(details)
if result:
return result
supermod.compounddefType.subclass = compounddefTypeSub
# end class compounddefTypeSub
class listofallmembersTypeSub(supermod.listofallmembersType):
def __init__(self, member=None):
supermod.listofallmembersType.__init__(self, member)
supermod.listofallmembersType.subclass = listofallmembersTypeSub
# end class listofallmembersTypeSub
class memberRefTypeSub(supermod.memberRefType):
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
supermod.memberRefType.subclass = memberRefTypeSub
# end class memberRefTypeSub
class compoundRefTypeSub(supermod.compoundRefType):
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.compoundRefType.__init__(self, mixedclass_, content_)
supermod.compoundRefType.subclass = compoundRefTypeSub
# end class compoundRefTypeSub
class reimplementTypeSub(supermod.reimplementType):
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.reimplementType.__init__(self, mixedclass_, content_)
supermod.reimplementType.subclass = reimplementTypeSub
# end class reimplementTypeSub
class incTypeSub(supermod.incType):
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.incType.__init__(self, mixedclass_, content_)
supermod.incType.subclass = incTypeSub
# end class incTypeSub
class refTypeSub(supermod.refType):
def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refType.__init__(self, mixedclass_, content_)
supermod.refType.subclass = refTypeSub
# end class refTypeSub
class refTextTypeSub(supermod.refTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refTextType.__init__(self, mixedclass_, content_)
supermod.refTextType.subclass = refTextTypeSub
# end class refTextTypeSub
class sectiondefTypeSub(supermod.sectiondefType):
def __init__(self, kind=None, header='', description=None, memberdef=None):
supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
def find(self, details):
for memberdef in self.memberdef:
if memberdef.id == details.refid:
return memberdef
return None
supermod.sectiondefType.subclass = sectiondefTypeSub
# end class sectiondefTypeSub
class memberdefTypeSub(supermod.memberdefType):
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
supermod.memberdefType.subclass = memberdefTypeSub
# end class memberdefTypeSub
class descriptionTypeSub(supermod.descriptionType):
def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
supermod.descriptionType.__init__(self, mixedclass_, content_)
supermod.descriptionType.subclass = descriptionTypeSub
# end class descriptionTypeSub
class enumvalueTypeSub(supermod.enumvalueType):
def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
supermod.enumvalueType.__init__(self, mixedclass_, content_)
supermod.enumvalueType.subclass = enumvalueTypeSub
# end class enumvalueTypeSub
class templateparamlistTypeSub(supermod.templateparamlistType):
def __init__(self, param=None):
supermod.templateparamlistType.__init__(self, param)
supermod.templateparamlistType.subclass = templateparamlistTypeSub
# end class templateparamlistTypeSub
class paramTypeSub(supermod.paramType):
def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None):
supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
supermod.paramType.subclass = paramTypeSub
# end class paramTypeSub
class linkedTextTypeSub(supermod.linkedTextType):
def __init__(self, ref=None, mixedclass_=None, content_=None):
supermod.linkedTextType.__init__(self, mixedclass_, content_)
supermod.linkedTextType.subclass = linkedTextTypeSub
# end class linkedTextTypeSub
class graphTypeSub(supermod.graphType):
def __init__(self, node=None):
supermod.graphType.__init__(self, node)
supermod.graphType.subclass = graphTypeSub
# end class graphTypeSub
class nodeTypeSub(supermod.nodeType):
def __init__(self, id=None, label='', link=None, childnode=None):
supermod.nodeType.__init__(self, id, label, link, childnode)
supermod.nodeType.subclass = nodeTypeSub
# end class nodeTypeSub
class childnodeTypeSub(supermod.childnodeType):
def __init__(self, relation=None, refid=None, edgelabel=None):
supermod.childnodeType.__init__(self, relation, refid, edgelabel)
supermod.childnodeType.subclass = childnodeTypeSub
# end class childnodeTypeSub
class linkTypeSub(supermod.linkType):
def __init__(self, refid=None, external=None, valueOf_=''):
supermod.linkType.__init__(self, refid, external)
supermod.linkType.subclass = linkTypeSub
# end class linkTypeSub
class listingTypeSub(supermod.listingType):
def __init__(self, codeline=None):
supermod.listingType.__init__(self, codeline)
supermod.listingType.subclass = listingTypeSub
# end class listingTypeSub
class codelineTypeSub(supermod.codelineType):
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
supermod.codelineType.subclass = codelineTypeSub
# end class codelineTypeSub
class highlightTypeSub(supermod.highlightType):
def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
supermod.highlightType.__init__(self, mixedclass_, content_)
supermod.highlightType.subclass = highlightTypeSub
# end class highlightTypeSub
class referenceTypeSub(supermod.referenceType):
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
supermod.referenceType.__init__(self, mixedclass_, content_)
supermod.referenceType.subclass = referenceTypeSub
# end class referenceTypeSub
class locationTypeSub(supermod.locationType):
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
supermod.locationType.subclass = locationTypeSub
# end class locationTypeSub
class docSect1TypeSub(supermod.docSect1Type):
def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect1Type.__init__(self, mixedclass_, content_)
supermod.docSect1Type.subclass = docSect1TypeSub
# end class docSect1TypeSub
class docSect2TypeSub(supermod.docSect2Type):
def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect2Type.__init__(self, mixedclass_, content_)
supermod.docSect2Type.subclass = docSect2TypeSub
# end class docSect2TypeSub
class docSect3TypeSub(supermod.docSect3Type):
def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect3Type.__init__(self, mixedclass_, content_)
supermod.docSect3Type.subclass = docSect3TypeSub
# end class docSect3TypeSub
class docSect4TypeSub(supermod.docSect4Type):
def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect4Type.__init__(self, mixedclass_, content_)
supermod.docSect4Type.subclass = docSect4TypeSub
# end class docSect4TypeSub
class docInternalTypeSub(supermod.docInternalType):
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
supermod.docInternalType.__init__(self, mixedclass_, content_)
supermod.docInternalType.subclass = docInternalTypeSub
# end class docInternalTypeSub
class docInternalS1TypeSub(supermod.docInternalS1Type):
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
supermod.docInternalS1Type.subclass = docInternalS1TypeSub
# end class docInternalS1TypeSub
class docInternalS2TypeSub(supermod.docInternalS2Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
supermod.docInternalS2Type.subclass = docInternalS2TypeSub
# end class docInternalS2TypeSub
class docInternalS3TypeSub(supermod.docInternalS3Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
supermod.docInternalS3Type.subclass = docInternalS3TypeSub
# end class docInternalS3TypeSub
class docInternalS4TypeSub(supermod.docInternalS4Type):
def __init__(self, para=None, mixedclass_=None, content_=None):
supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
supermod.docInternalS4Type.subclass = docInternalS4TypeSub
# end class docInternalS4TypeSub
class docURLLinkSub(supermod.docURLLink):
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docURLLink.__init__(self, mixedclass_, content_)
supermod.docURLLink.subclass = docURLLinkSub
# end class docURLLinkSub
class docAnchorTypeSub(supermod.docAnchorType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docAnchorType.__init__(self, mixedclass_, content_)
supermod.docAnchorType.subclass = docAnchorTypeSub
# end class docAnchorTypeSub
class docFormulaTypeSub(supermod.docFormulaType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docFormulaType.__init__(self, mixedclass_, content_)
supermod.docFormulaType.subclass = docFormulaTypeSub
# end class docFormulaTypeSub
class docIndexEntryTypeSub(supermod.docIndexEntryType):
def __init__(self, primaryie='', secondaryie=''):
supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
# end class docIndexEntryTypeSub
class docListTypeSub(supermod.docListType):
def __init__(self, listitem=None):
supermod.docListType.__init__(self, listitem)
supermod.docListType.subclass = docListTypeSub
# end class docListTypeSub
class docListItemTypeSub(supermod.docListItemType):
def __init__(self, para=None):
supermod.docListItemType.__init__(self, para)
supermod.docListItemType.subclass = docListItemTypeSub
# end class docListItemTypeSub
class docSimpleSectTypeSub(supermod.docSimpleSectType):
def __init__(self, kind=None, title=None, para=None):
supermod.docSimpleSectType.__init__(self, kind, title, para)
supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
# end class docSimpleSectTypeSub
class docVarListEntryTypeSub(supermod.docVarListEntryType):
def __init__(self, term=None):
supermod.docVarListEntryType.__init__(self, term)
supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
# end class docVarListEntryTypeSub
class docRefTextTypeSub(supermod.docRefTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docRefTextType.__init__(self, mixedclass_, content_)
supermod.docRefTextType.subclass = docRefTextTypeSub
# end class docRefTextTypeSub
class docTableTypeSub(supermod.docTableType):
def __init__(self, rows=None, cols=None, row=None, caption=None):
supermod.docTableType.__init__(self, rows, cols, row, caption)
supermod.docTableType.subclass = docTableTypeSub
# end class docTableTypeSub
class docRowTypeSub(supermod.docRowType):
def __init__(self, entry=None):
supermod.docRowType.__init__(self, entry)
supermod.docRowType.subclass = docRowTypeSub
# end class docRowTypeSub
class docEntryTypeSub(supermod.docEntryType):
def __init__(self, thead=None, para=None):
supermod.docEntryType.__init__(self, thead, para)
supermod.docEntryType.subclass = docEntryTypeSub
# end class docEntryTypeSub
class docHeadingTypeSub(supermod.docHeadingType):
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docHeadingType.__init__(self, mixedclass_, content_)
supermod.docHeadingType.subclass = docHeadingTypeSub
# end class docHeadingTypeSub
class docImageTypeSub(supermod.docImageType):
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docImageType.__init__(self, mixedclass_, content_)
supermod.docImageType.subclass = docImageTypeSub
# end class docImageTypeSub
class docDotFileTypeSub(supermod.docDotFileType):
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docDotFileType.__init__(self, mixedclass_, content_)
supermod.docDotFileType.subclass = docDotFileTypeSub
# end class docDotFileTypeSub
class docTocItemTypeSub(supermod.docTocItemType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docTocItemType.__init__(self, mixedclass_, content_)
supermod.docTocItemType.subclass = docTocItemTypeSub
# end class docTocItemTypeSub
class docTocListTypeSub(supermod.docTocListType):
def __init__(self, tocitem=None):
supermod.docTocListType.__init__(self, tocitem)
supermod.docTocListType.subclass = docTocListTypeSub
# end class docTocListTypeSub
class docLanguageTypeSub(supermod.docLanguageType):
def __init__(self, langid=None, para=None):
supermod.docLanguageType.__init__(self, langid, para)
supermod.docLanguageType.subclass = docLanguageTypeSub
# end class docLanguageTypeSub
class docParamListTypeSub(supermod.docParamListType):
def __init__(self, kind=None, parameteritem=None):
supermod.docParamListType.__init__(self, kind, parameteritem)
supermod.docParamListType.subclass = docParamListTypeSub
# end class docParamListTypeSub
class docParamListItemSub(supermod.docParamListItem):
def __init__(self, parameternamelist=None, parameterdescription=None):
supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
supermod.docParamListItem.subclass = docParamListItemSub
# end class docParamListItemSub
class docParamNameListSub(supermod.docParamNameList):
def __init__(self, parametername=None):
supermod.docParamNameList.__init__(self, parametername)
supermod.docParamNameList.subclass = docParamNameListSub
# end class docParamNameListSub
class docParamNameSub(supermod.docParamName):
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
supermod.docParamName.__init__(self, mixedclass_, content_)
supermod.docParamName.subclass = docParamNameSub
# end class docParamNameSub
class docXRefSectTypeSub(supermod.docXRefSectType):
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
supermod.docXRefSectType.subclass = docXRefSectTypeSub
# end class docXRefSectTypeSub
class docCopyTypeSub(supermod.docCopyType):
def __init__(self, link=None, para=None, sect1=None, internal=None):
supermod.docCopyType.__init__(self, link, para, sect1, internal)
supermod.docCopyType.subclass = docCopyTypeSub
# end class docCopyTypeSub
class docCharTypeSub(supermod.docCharType):
def __init__(self, char=None, valueOf_=''):
supermod.docCharType.__init__(self, char)
supermod.docCharType.subclass = docCharTypeSub
# end class docCharTypeSub
class docParaTypeSub(supermod.docParaType):
def __init__(self, char=None, valueOf_=''):
supermod.docParaType.__init__(self, char)
self.parameterlist = []
self.simplesects = []
self.content = []
def buildChildren(self, child_, nodeName_):
supermod.docParaType.buildChildren(self, child_, nodeName_)
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == "ref":
obj_ = supermod.docRefTextType.factory()
obj_.build(child_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'parameterlist':
obj_ = supermod.docParamListType.factory()
obj_.build(child_)
self.parameterlist.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'simplesect':
obj_ = supermod.docSimpleSectType.factory()
obj_.build(child_)
self.simplesects.append(obj_)
supermod.docParaType.subclass = docParaTypeSub
# end class docParaTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
|
daniestevez/gr-ao40
|
docs/doxygen/doxyxml/generated/compound.py
|
Python
|
gpl-3.0
| 20,299
|
"""
:mod: DataManager
.. module: DataManager
:synopsis: DataManager links the functionalities of StorageElement and FileCatalog.
This module consists of DataManager and related classes.
"""
# # RSCID
__RCSID__ = "$Id$"
# # imports
from datetime import datetime, timedelta
import fnmatch
import os
import time
from types import StringTypes, ListType, DictType, StringType, TupleType
# # from DIRAC
import DIRAC
from DIRAC import S_OK, S_ERROR, gLogger, gConfig
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.ConfigurationSystem.Client.Helpers.Resources import getRegistrationProtocols, getThirdPartyProtocols
from DIRAC.AccountingSystem.Client.DataStoreClient import gDataStoreClient
from DIRAC.AccountingSystem.Client.Types.DataOperation import DataOperation
from DIRAC.Core.Utilities.Adler import fileAdler, compareAdler
from DIRAC.Core.Utilities.File import makeGuid, getSize
from DIRAC.Core.Utilities.List import randomize
from DIRAC.Core.Utilities.SiteSEMapping import getSEsForSite, isSameSiteSE, getSEsForCountry
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Storage.StorageFactory import StorageFactory
from DIRAC.ResourceStatusSystem.Client.ResourceStatus import ResourceStatus
from DIRAC.Core.Security.ProxyInfo import getProxyInfo
from DIRAC.Core.Utilities.ReturnValues import returnSingleResult
class DataManager( object ):
"""
.. class:: DataManager
A DataManager is taking all the actions that impact or require the FileCatalog and the StorageElement together
"""
def __init__( self, catalogs = [], masterCatalogOnly = False, vo = False ):
""" c'tor
:param self: self reference
:param catalogs: the list of catalog in which to perform the operations. This
list will be ignored if masterCatalogOnly is set to True
:param masterCatalogOnly: if set to True, the operations will be performed only on the master catalog.
The catalogs parameter will be ignored.
:param vo: the VO for which the DataManager is created, get VO from the current proxy if not specified
"""
self.log = gLogger.getSubLogger( self.__class__.__name__, True )
self.vo = vo
catalogsToUse = FileCatalog( vo = self.vo ).getMasterCatalogNames()['Value'] if masterCatalogOnly else catalogs
self.fc = FileCatalog( catalogs = catalogsToUse, vo = self.vo )
self.accountingClient = None
self.registrationProtocol = getRegistrationProtocols()
self.thirdPartyProtocols = getThirdPartyProtocols()
self.resourceStatus = ResourceStatus()
self.ignoreMissingInFC = Operations( self.vo ).getValue( 'DataManagement/IgnoreMissingInFC', False )
self.useCatalogPFN = Operations( self.vo ).getValue( 'DataManagement/UseCatalogPFN', True )
def setAccountingClient( self, client ):
""" Set Accounting Client instance
"""
self.accountingClient = client
def __verifyWritePermission( self, path ):
""" Check if we have write permission to the given file (if exists) or its directory
"""
if type( path ) in StringTypes:
paths = [ path ]
else:
paths = path
res = self.fc.getPathPermissions( paths )
if not res['OK']:
return res
result = {'Successful':[], 'Failed':[]}
for path in paths:
if res['Value']['Successful'].get( path, {} ).get( 'Write', False ):
result['Successful'].append( path )
else:
result['Failed'].append( path )
return S_OK( result )
##########################################################################
#
# These are the bulk removal methods
#
def cleanLogicalDirectory( self, lfnDir ):
""" Clean the logical directory from the catalog and storage
"""
if type( lfnDir ) in StringTypes:
lfnDir = [ lfnDir ]
retDict = { "Successful" : {}, "Failed" : {} }
for folder in lfnDir:
res = self.__cleanDirectory( folder )
if not res['OK']:
self.log.debug( "Failed to clean directory.", "%s %s" % ( folder, res['Message'] ) )
retDict["Failed"][folder] = res['Message']
else:
self.log.debug( "Successfully removed directory.", folder )
retDict["Successful"][folder] = res['Value']
return S_OK( retDict )
def __cleanDirectory( self, folder ):
""" delete all files from directory :folder: in FileCatalog and StorageElement
:param self: self reference
:param str folder: directory name
"""
res = self.__verifyWritePermission( folder )
if not res['OK']:
return res
if folder not in res['Value']['Successful']:
errStr = "__cleanDirectory: Write access not permitted for this credential."
self.log.debug( errStr, folder )
return S_ERROR( errStr )
res = self.__getCatalogDirectoryContents( [ folder ] )
if not res['OK']:
return res
res = self.removeFile( res['Value'].keys() )
if not res['OK']:
return res
for lfn, reason in res['Value']['Failed'].items():
gLogger.error( "Failed to remove file found in the catalog", "%s %s" % ( lfn, reason ) )
res = returnSingleResult( self.removeFile( [ '%s/dirac_directory' % folder ] ) )
if not res['OK']:
if not "No such file" in res['Message']:
gLogger.warn( 'Failed to delete dirac_directory placeholder file' )
storageElements = gConfig.getValue( 'Resources/StorageElementGroups/SE_Cleaning_List', [] )
failed = False
for storageElement in sorted( storageElements ):
res = self.__removeStorageDirectory( folder, storageElement )
if not res['OK']:
failed = True
if failed:
return S_ERROR( "Failed to clean storage directory at all SEs" )
res = returnSingleResult( self.fc.removeDirectory( folder, recursive = True ) )
if not res['OK']:
return res
return S_OK()
def __removeStorageDirectory( self, directory, storageElement ):
""" delete SE directory
:param self: self reference
:param str directory: folder to be removed
:param str storageElement: DIRAC SE name
"""
se = StorageElement( storageElement, vo = self.vo )
res = returnSingleResult( se.exists( directory ) )
if not res['OK']:
self.log.debug( "Failed to obtain existance of directory", res['Message'] )
return res
exists = res['Value']
if not exists:
self.log.debug( "The directory %s does not exist at %s " % ( directory, storageElement ) )
return S_OK()
res = returnSingleResult( se.removeDirectory( directory, recursive = True ) )
if not res['OK']:
self.log.debug( "Failed to remove storage directory", res['Message'] )
return res
self.log.debug( "Successfully removed %d files from %s at %s" % ( res['Value']['FilesRemoved'],
directory,
storageElement ) )
return S_OK()
def __getCatalogDirectoryContents( self, directories ):
""" ls recursively all files in directories
:param self: self reference
:param list directories: folder names
"""
self.log.debug( 'Obtaining the catalog contents for %d directories:' % len( directories ) )
activeDirs = directories
allFiles = {}
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
res = returnSingleResult( self.fc.listDirectory( currentDir ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Problem getting the %s directory content" % currentDir, res['Message'] )
else:
dirContents = res['Value']
activeDirs.extend( dirContents['SubDirs'] )
allFiles.update( dirContents['Files'] )
self.log.debug( "Found %d files" % len( allFiles ) )
return S_OK( allFiles )
def getReplicasFromDirectory( self, directory ):
""" get all replicas from a given directory
:param self: self reference
:param mixed directory: list of directories or one directory
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
res = self.__getCatalogDirectoryContents( directories )
if not res['OK']:
return res
allReplicas = {}
for lfn, metadata in res['Value'].items():
allReplicas[lfn] = metadata['Replicas']
return S_OK( allReplicas )
def getFilesFromDirectory( self, directory, days = 0, wildcard = '*' ):
""" get all files from :directory: older than :days: days matching to :wildcard:
:param self: self reference
:param mixed directory: list of directories or directory name
:param int days: ctime days
:param str wildcard: pattern to match
"""
if type( directory ) in StringTypes:
directories = [directory]
else:
directories = directory
self.log.debug( "Obtaining the files older than %d days in %d directories:" % ( days, len( directories ) ) )
for folder in directories:
self.log.debug( folder )
activeDirs = directories
allFiles = []
while len( activeDirs ) > 0:
currentDir = activeDirs[0]
# We only need the metadata (verbose) if a limit date is given
res = returnSingleResult( self.fc.listDirectory( currentDir, verbose = ( days != 0 ) ) )
activeDirs.remove( currentDir )
if not res['OK']:
self.log.debug( "Error retrieving directory contents", "%s %s" % ( currentDir, res['Message'] ) )
else:
dirContents = res['Value']
subdirs = dirContents['SubDirs']
files = dirContents['Files']
self.log.debug( "%s: %d files, %d sub-directories" % ( currentDir, len( files ), len( subdirs ) ) )
for subdir in subdirs:
if ( not days ) or self.__isOlderThan( subdirs[subdir]['CreationDate'], days ):
if subdir[0] != '/':
subdir = currentDir + '/' + subdir
activeDirs.append( subdir )
for fileName in files:
fileInfo = files[fileName]
fileInfo = fileInfo.get( 'Metadata', fileInfo )
if ( not days ) or not fileInfo.get( 'CreationDate' ) or self.__isOlderThan( fileInfo['CreationDate'], days ):
if wildcard == '*' or fnmatch.fnmatch( fileName, wildcard ):
fileName = fileInfo.get( 'LFN', fileName )
allFiles.append( fileName )
return S_OK( allFiles )
def __isOlderThan( self, stringTime, days ):
timeDelta = timedelta( days = days )
maxCTime = datetime.utcnow() - timeDelta
# st = time.strptime( stringTime, "%a %b %d %H:%M:%S %Y" )
# cTimeStruct = datetime( st[0], st[1], st[2], st[3], st[4], st[5], st[6], None )
cTimeStruct = stringTime
if cTimeStruct < maxCTime:
return True
return False
##########################################################################
#
# These are the data transfer methods
#
def getFile( self, lfn, destinationDir = '' ):
""" Get a local copy of a LFN from Storage Elements.
'lfn' is the logical file name for the desired file
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "getFile: Supplied lfn must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "getFile: Attempting to get %s files." % len( lfns ) )
res = self.getActiveReplicas( lfns )
if not res['OK']:
return res
failed = res['Value']['Failed']
lfnReplicas = res['Value']['Successful']
res = self.fc.getFileMetadata( lfnReplicas.keys() )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
fileMetadata = res['Value']['Successful']
successful = {}
for lfn in fileMetadata:
res = self.__getFile( lfn, lfnReplicas[lfn], fileMetadata[lfn], destinationDir )
if not res['OK']:
failed[lfn] = res['Message']
else:
successful[lfn] = res['Value']
gDataStoreClient.commit()
return S_OK( { 'Successful': successful, 'Failed' : failed } )
def __getFile( self, lfn, replicas, metadata, destinationDir ):
if not replicas:
errStr = "No accessible replicas found"
self.log.debug( errStr )
return S_ERROR( errStr )
# Determine the best replicas
res = self._getSEProximity( replicas.keys() )
if not res['OK']:
return res
errTuple = ( "No SE", "found" )
for storageElementName in res['Value']:
se = StorageElement( storageElementName, vo = self.vo )
oDataOperation = self.__initialiseAccountingObject( 'getFile', storageElementName, 1 )
oDataOperation.setStartTime()
startTime = time.time()
res = returnSingleResult( se.getFile( lfn, localPath = os.path.realpath( destinationDir ) ) )
getTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', getTime )
if not res['OK']:
errTuple = ( "Error getting file from storage:", "%s from %s, %s" % ( lfn, storageElementName, res['Message'] ) )
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
else:
oDataOperation.setValueByKey( 'TransferSize', res['Value'] )
localFile = os.path.realpath( os.path.join( destinationDir, os.path.basename( lfn ) ) )
localAdler = fileAdler( localFile )
if ( metadata['Size'] != res['Value'] ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
errTuple = ( "Mismatch of sizes:", "downloaded = %d, catalog = %d" % ( res['Value'], metadata['Size'] ) )
elif ( metadata['Checksum'] ) and ( not compareAdler( metadata['Checksum'], localAdler ) ):
oDataOperation.setValueByKey( 'FinalStatus', 'FinishedDirty' )
errTuple = ( "Mismatch of checksums:", "downloaded = %s, catalog = %s" % ( localAdler, metadata['Checksum'] ) )
else:
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
return S_OK( localFile )
# If we are here, there was an error, log it debug level
self.log.debug( errTuple[0], errTuple[1] )
gDataStoreClient.addRegister( oDataOperation )
self.log.verbose( "getFile: Failed to get local copy from any replicas:", "\n%s %s" % errTuple )
return S_ERROR( "DataManager.getFile: Failed to get local copy from any replicas\n%s %s" % errTuple )
def _getSEProximity( self, ses ):
""" get SE proximity """
siteName = DIRAC.siteName()
localSEs = [se for se in getSEsForSite( siteName )['Value'] if se in ses]
countrySEs = []
countryCode = str( siteName ).split( '.' )[-1]
res = getSEsForCountry( countryCode )
if res['OK']:
countrySEs = [se for se in res['Value'] if se in ses and se not in localSEs]
sortedSEs = randomize( localSEs ) + randomize( countrySEs )
sortedSEs += randomize( [se for se in ses if se not in sortedSEs] )
return S_OK( sortedSEs )
def putAndRegister( self, lfn, fileName, diracSE, guid = None, path = None, checksum = None ):
""" Put a local file to a Storage Element and register in the File Catalogues
'lfn' is the file LFN
'file' is the full path to the local file
'diracSE' is the Storage Element to which to put the file
'guid' is the guid with which the file is to be registered (if not provided will be generated)
'path' is the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# ancestors = ancestors if ancestors else list(
folder = os.path.dirname( lfn )
res = self.__verifyWritePermission( folder )
if not res['OK']:
return res
if folder not in res['Value']['Successful']:
errStr = "putAndRegister: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "putAndRegister: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "putAndRegister: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the GUID is not given, generate it here
if not guid:
guid = makeGuid( fileName )
if not checksum:
self.log.debug( "putAndRegister: Checksum information not provided. Calculating adler32." )
checksum = fileAdler( fileName )
self.log.debug( "putAndRegister: Checksum calculated to be %s." % checksum )
res = self.fc.exists( {lfn:guid} )
if not res['OK']:
errStr = "putAndRegister: Completey failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return res
if lfn not in res['Value']['Successful']:
errStr = "putAndRegister: Failed to determine existence of destination LFN."
self.log.debug( errStr, lfn )
return S_ERROR( errStr )
if res['Value']['Successful'][lfn]:
if res['Value']['Successful'][lfn] == lfn:
errStr = "putAndRegister: The supplied LFN already exists in the File Catalog."
self.log.debug( errStr, lfn )
else:
errStr = "putAndRegister: This file GUID already exists for another file. " \
"Please remove it and try again."
self.log.debug( errStr, res['Value']['Successful'][lfn] )
return S_ERROR( "%s %s" % ( errStr, res['Value']['Successful'][lfn] ) )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE, vo = self.vo )
res = storageElement.isValid()
if not res['OK']:
errStr = "putAndRegister: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
fileDict = {lfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
oDataOperation = self.__initialiseAccountingObject( 'putAndRegister', diracSE, 1 )
oDataOperation.setStartTime()
oDataOperation.setValueByKey( 'TransferSize', size )
startTime = time.time()
res = returnSingleResult( storageElement.putFile( fileDict ) )
putTime = time.time() - startTime
oDataOperation.setValueByKey( 'TransferTime', putTime )
if not res['OK']:
errStr = "putAndRegister: Failed to put file to Storage Element."
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
successful[lfn] = {'put': putTime}
###########################################################
# Perform the registration here
destinationSE = storageElement.getStorageElementName()['Value']
res = returnSingleResult( storageElement.getURL( lfn ) )
if not res['OK']:
errStr = "putAndRegister: Failed to generate destination PFN."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
destUrl = res['Value']
oDataOperation.setValueByKey( 'RegistrationTotal', 1 )
fileTuple = ( lfn, destUrl, size, destinationSE, guid, checksum )
registerDict = {'LFN':lfn, 'PFN':destUrl, 'Size':size, 'TargetSE':destinationSE, 'GUID':guid, 'Addler':checksum}
startTime = time.time()
res = self.registerFile( fileTuple )
registerTime = time.time() - startTime
oDataOperation.setValueByKey( 'RegistrationTime', registerTime )
if not res['OK']:
errStr = "putAndRegister: Completely failed to register file."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'register' : registerDict }
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
elif lfn in res['Value']['Failed']:
errStr = "putAndRegister: Failed to register file."
self.log.debug( errStr, "%s %s" % ( lfn, res['Value']['Failed'][lfn] ) )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
failed[lfn] = { 'register' : registerDict }
else:
successful[lfn]['register'] = registerTime
oDataOperation.setValueByKey( 'RegistrationOK', 1 )
oDataOperation.setEndTime()
gDataStoreClient.addRegister( oDataOperation )
startTime = time.time()
gDataStoreClient.commit()
self.log.debug( 'putAndRegister: Sending accounting took %.1f seconds' % ( time.time() - startTime ) )
return S_OK( {'Successful': successful, 'Failed': failed } )
def replicateAndRegister( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' , catalog = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
successful = {}
failed = {}
self.log.debug( "replicateAndRegister: Attempting to replicate %s to %s." % ( lfn, destSE ) )
startReplication = time.time()
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
replicationTime = time.time() - startReplication
if not res['OK']:
errStr = "DataManager.replicateAndRegister: Completely failed to replicate file."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicateAndRegister: %s already present at %s." % ( lfn, destSE ) )
successful[lfn] = { 'replicate' : 0, 'register' : 0 }
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
successful[lfn] = { 'replicate' : replicationTime }
destPfn = res['Value']['DestPfn']
destSE = res['Value']['DestSE']
self.log.debug( "replicateAndRegister: Attempting to register %s at %s." % ( destPfn, destSE ) )
replicaTuple = ( lfn, destPfn, destSE )
startRegistration = time.time()
res = self.registerReplica( replicaTuple, catalog = catalog )
registrationTime = time.time() - startRegistration
if not res['OK']:
# Need to return to the client that the file was replicated but not registered
errStr = "replicateAndRegister: Completely failed to register replica."
self.log.debug( errStr, res['Message'] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
else:
if lfn in res['Value']['Successful']:
self.log.debug( "replicateAndRegister: Successfully registered replica." )
successful[lfn]['register'] = registrationTime
else:
errStr = "replicateAndRegister: Failed to register replica."
self.log.debug( errStr, res['Value']['Failed'][lfn] )
failed[lfn] = { 'Registration' : { 'LFN' : lfn, 'TargetSE' : destSE, 'PFN' : destPfn } }
return S_OK( {'Successful': successful, 'Failed': failed} )
def replicate( self, lfn, destSE, sourceSE = '', destPath = '', localCache = '' ):
""" Replicate a LFN to a destination SE and register the replica.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' is the local file system location to be used as a temporary cache
"""
self.log.debug( "replicate: Attempting to replicate %s to %s." % ( lfn, destSE ) )
res = self.__replicate( lfn, destSE, sourceSE, destPath, localCache )
if not res['OK']:
errStr = "replicate: Replication failed."
self.log.debug( errStr, "%s %s" % ( lfn, destSE ) )
return res
if not res['Value']:
# The file was already present at the destination SE
self.log.debug( "replicate: %s already present at %s." % ( lfn, destSE ) )
return res
return S_OK( lfn )
def __replicate( self, lfn, destSEName, sourceSEName = None, destPath = None, localCache = None ):
""" Replicate a LFN to a destination SE.
'lfn' is the LFN to be replicated
'destSE' is the Storage Element the file should be replicated to
'sourceSE' is the source for the file replication (where not specified all replicas will be attempted)
'destPath' is the path on the destination storage element, if to be different from LHCb convention
'localCache' if cannot do third party transfer, we do get and put through this local directory
"""
log = self.log.getSubLogger( '__replicate', True )
###########################################################
# Check that we have write permissions to this directory.
res = self.__verifyWritePermission( lfn )
if not res['OK']:
return res
if lfn not in res['Value']['Successful']:
errStr = "__replicate: Write access not permitted for this credential."
log.debug( errStr, lfn )
return S_ERROR( errStr )
# Check that the destination storage element is sane and resolve its name
log.debug( "Verifying destination StorageElement validity (%s)." % ( destSEName ) )
destStorageElement = StorageElement( destSEName, vo = self.vo )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "The storage element is not currently valid."
log.debug( errStr, "%s %s" % ( destSEName, res['Message'] ) )
return S_ERROR( errStr )
# Get the real name of the SE
destSEName = destStorageElement.getStorageElementName()['Value']
###########################################################
# Check whether the destination storage element is banned
log.verbose( "Determining whether %s ( destination ) is Write-banned." % destSEName )
if not self.__SEActive( destSEName ).get( 'Value', {} ).get( 'Write' ):
infoStr = "Supplied destination Storage Element is not currently allowed for Write."
log.debug( infoStr, destSEName )
return S_ERROR( infoStr )
# Get the LFN replicas from the file catalog
log.debug( "Attempting to obtain replicas for %s." % ( lfn ) )
res = returnSingleResult( self.getReplicas( lfn ) )
if not res[ 'OK' ]:
errStr = "%Failed to get replicas for LFN."
log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
log.debug( "Successfully obtained replicas for LFN." )
lfnReplicas = res['Value']
###########################################################
# If the file catalog size is zero fail the transfer
log.debug( "Attempting to obtain size for %s." % lfn )
res = returnSingleResult( self.fc.getFileSize( lfn ) )
if not res['OK']:
errStr = "Failed to get size for LFN."
log.debug( errStr, "%s %s" % ( lfn, res['Message'] ) )
return S_ERROR( "%s %s" % ( errStr, res['Message'] ) )
catalogSize = res['Value']
if catalogSize == 0:
errStr = "Registered file size is 0."
log.debug( errStr, lfn )
return S_ERROR( errStr )
log.debug( "File size determined to be %s." % catalogSize )
###########################################################
# If the LFN already exists at the destination we have nothing to do
if destSEName in lfnReplicas:
log.debug( "__replicate: LFN is already registered at %s." % destSEName )
return S_OK()
###########################################################
# If the source is specified, check that it is in the replicas
if sourceSEName:
log.debug( "Determining whether source Storage Element specified is sane." )
if sourceSEName not in lfnReplicas:
errStr = "LFN does not exist at supplied source SE."
log.error( errStr, "%s %s" % ( lfn, sourceSEName ) )
return S_ERROR( errStr )
# If sourceSE is specified, then we consider this one only, otherwise
# we consider them all
possibleSourceSEs = [sourceSEName] if sourceSEName else lfnReplicas.keys()
# We sort the possibileSourceSEs with the SEs that are on the same site than the destination first
# reverse = True because True > False
possibleSourceSEs = sorted( possibleSourceSEs,
key = lambda x : isSameSiteSE( x, destSEName ).get( 'Value', False ),
reverse = True )
# In case we manage to find SEs that would work as a source, but we can't negotiate a protocol
# we will do a get and put using one of this sane SE
possibleSEsForIntermediateTransfer = []
# Take into account the destination path
if destPath:
destPath = '%s/%s' % ( destPath, os.path.basename( lfn ) )
else:
destPath = lfn
for candidateSEName in possibleSourceSEs:
log.debug( "Consider %s as a source" % candidateSEName )
# Check that the candidate is active
if not self.__SEActive( candidateSEName ).get( 'Value', {} ).get( 'Read' ):
log.debug( "%s is currently not allowed as a source." % candidateSEName )
continue
else:
log.debug( "%s is available for use." % candidateSEName )
candidateSE = StorageElement( candidateSEName, vo = self.vo )
# Check that the SE is valid
res = candidateSE.isValid()
if not res['OK']:
log.debug( "The storage element is not currently valid.", "%s %s" % ( candidateSEName, res['Message'] ) )
continue
else:
log.debug( "The storage is currently valid", candidateSEName )
# Check that the file size corresponds to the one in the FC
res = returnSingleResult( candidateSE.getFileSize( lfn ) )
if not res['OK']:
log.debug( "could not get fileSize on %s" % candidateSEName, res['Message'] )
continue
seFileSize = res['Value']
if seFileSize != catalogSize:
log.debug( "Catalog size and physical file size mismatch.", "%s %s" % ( catalogSize, seFileSize ) )
continue
else:
log.debug( "Catalog size and physical size match" )
res = destStorageElement.negociateProtocolWithOtherSE( candidateSE, protocols = self.thirdPartyProtocols )
if not res['OK']:
log.debug( "Error negotiating replication protocol", res['Message'] )
continue
replicationProtocol = res['Value']
if not replicationProtocol:
possibleSEsForIntermediateTransfer.append( candidateSE )
log.debug( "No protocol suitable for replication found" )
continue
log.debug( 'Found common protocols', replicationProtocol )
# THIS WOULD NOT WORK IF PROTO == file !!
# Compare the urls to make sure we are not overwriting
res = returnSingleResult( candidateSE.getURL( lfn, protocol = replicationProtocol ) )
if not res['OK']:
log.debug( "Cannot get sourceURL", res['Message'] )
continue
sourceURL = res['Value']
res = returnSingleResult( destStorageElement.getURL( destPath, protocol = replicationProtocol ) )
if not res['OK']:
log.debug( "Cannot get destURL", res['Message'] )
continue
destURL = res['Value']
if sourceURL == destURL:
log.debug( "Same source and destination, give up" )
continue
# Attempt the transfer
res = returnSingleResult( destStorageElement.replicateFile( {destPath:sourceURL}, sourceSize = catalogSize ) )
if not res['OK']:
log.debug( "Replication failed", "%s from %s to %s." % ( lfn, candidateSEName, destSEName ) )
continue
log.debug( "Replication successful.", res['Value'] )
res = returnSingleResult( destStorageElement.getURL(destPath, protocol = self.registrationProtocol))
if not res['OK']:
log.debug( 'Error getting the registration URL', res['Message'] )
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK( {'DestSE':destSEName, 'DestPfn':registrationURL} )
# If we are here, that means that we could not make a third party transfer.
# Check if we have some sane SEs from which we could do a get/put
localDir = os.path.realpath( localCache if localCache else '.' )
localFile = os.path.join( localDir, os.path.basename( lfn ) )
log.debug( "Will try intermediate transfer from %s sources" % len( possibleSEsForIntermediateTransfer ) )
for candidateSE in possibleSEsForIntermediateTransfer:
res = returnSingleResult( candidateSE.getFile( lfn, localPath = localDir ) )
if not res['OK']:
log.debug( 'Error getting the file from %s' % candidateSE.name, res['Message'] )
continue
res = returnSingleResult( destStorageElement.putFile( {destPath:localFile} ) )
if not res['OK']:
log.debug( 'Error putting file coming from %s' % candidateSE.name, res['Message'] )
# if the put is the problem, it's maybe pointless to try the other candidateSEs...
continue
# get URL with default protocol to return it
res = returnSingleResult( destStorageElement.getURL( destPath, protocol = self.registrationProtocol ) )
if not res['OK']:
log.debug( 'Error getting the registration URL', res['Message'] )
# it's maybe pointless to try the other candidateSEs...
continue
registrationURL = res['Value']
return S_OK( {'DestSE':destSEName, 'DestPfn':registrationURL} )
# If here, we are really doomed
errStr = "Failed to replicate with all sources."
log.debug( errStr, lfn )
return S_ERROR( errStr )
###################################################################
#
# These are the file catalog write methods
#
def registerFile( self, fileTuple, catalog = '' ):
""" Register a file or a list of files
:param self: self reference
:param tuple fileTuple: (lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum )
:param str catalog: catalog name
"""
if type( fileTuple ) == ListType:
fileTuples = fileTuple
elif type( fileTuple ) == TupleType:
fileTuples = [fileTuple]
else:
errStr = "registerFile: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerFile: Attempting to register %s files." % len( fileTuples ) )
res = self.__registerFile( fileTuples, catalog )
if not res['OK']:
errStr = "registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return res
# Remove Failed LFNs if they are in success
success = res['Value']['Successful']
failed = res['Value']['Failed']
return res
def __registerFile( self, fileTuples, catalog ):
""" register file to cataloge """
fileDict = {}
for lfn, physicalFile, fileSize, storageElementName, fileGuid, checksum in fileTuples:
fileDict[lfn] = {'PFN':physicalFile, 'Size':fileSize, 'SE':storageElementName, 'GUID':fileGuid, 'Checksum':checksum}
if catalog:
fileCatalog = FileCatalog( catalog, vo = self.vo )
if not fileCatalog.isOK():
return S_ERROR( "Can't get FileCatalog %s" % catalog )
else:
fileCatalog = self.fc
res = fileCatalog.addFile( fileDict )
if not res['OK']:
errStr = "__registerFile: Completely failed to register files."
self.log.debug( errStr, res['Message'] )
return res
def registerReplica( self, replicaTuple, catalog = '' ):
""" Register a replica (or list of) supplied in the replicaTuples.
'replicaTuple' is a tuple or list of tuples of the form (lfn,pfn,se)
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [ replicaTuple ]
else:
errStr = "registerReplica: Supplied file info must be tuple of list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "registerReplica: Attempting to register %s replicas." % len( replicaTuples ) )
res = self.__registerReplica( replicaTuples, catalog )
if not res['OK']:
errStr = "registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return res
# Remove Failed LFNs if they are in success
success = res['Value']['Successful']
failed = res['Value']['Failed']
return res
def __registerReplica( self, replicaTuples, catalog ):
""" register replica to catalogue """
seDict = {}
for lfn, url, storageElementName in replicaTuples:
seDict.setdefault( storageElementName, [] ).append( ( lfn, url ) )
failed = {}
replicaTuples = []
for storageElementName, replicaTuple in seDict.items():
destStorageElement = StorageElement( storageElementName, vo = self.vo )
res = destStorageElement.isValid()
if not res['OK']:
errStr = "__registerReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
for lfn, url in replicaTuple:
failed[lfn] = errStr
else:
storageElementName = destStorageElement.getStorageElementName()['Value']
for lfn, url in replicaTuple:
res = returnSingleResult( destStorageElement.getURL( lfn, protocol = self.registrationProtocol ) )
if not res['OK']:
failed[lfn] = res['Message']
else:
replicaTuple = ( lfn, res['Value'], storageElementName, False )
replicaTuples.append( replicaTuple )
self.log.debug( "__registerReplica: Successfully resolved %s replicas for registration." % len( replicaTuples ) )
# HACK!
replicaDict = {}
for lfn, url, se, _master in replicaTuples:
replicaDict[lfn] = {'SE':se, 'PFN':url}
if catalog:
fileCatalog = FileCatalog( catalog, vo = self.vo )
res = fileCatalog.addReplica( replicaDict )
else:
res = self.fc.addReplica( replicaDict )
if not res['OK']:
errStr = "__registerReplica: Completely failed to register replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
###################################################################
#
# These are the removal methods for physical and catalogue removal
#
def removeFile( self, lfn, force = None ):
""" Remove the file (all replicas) from Storage Elements and file catalogue
'lfn' is the file to be removed
"""
if force == None:
force = self.ignoreMissingInFC
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeFile: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
# First check if the file exists in the FC
res = self.fc.exists( lfns )
if not res['OK']:
return res
success = res['Value']['Successful']
lfns = [lfn for lfn in success if success[lfn] ]
if force:
# Files that don't exist are removed successfully
successful = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], True )
failed = {}
else:
successful = {}
failed = dict.fromkeys( [lfn for lfn in success if not success[lfn] ], 'No such file or directory' )
# Check that we have write permissions to this directory and to the file.
if lfns:
dir4lfns = {}
for lfn in lfns:
dir4lfns.setdefault( os.path.dirname( lfn ), [] ).append( lfn )
res = self.__verifyWritePermission( dir4lfns.keys() )
if not res['OK']:
return res
if res['Value']['Failed']:
errStr = "removeFile: Write access not permitted for this credential."
self.log.debug( errStr, 'for %d files' % len( res['Value']['Failed'] ) )
failed.update( dict.fromkeys( [lfn for dirName in res['Value']['Failed'] for lfn in dir4lfns[dirName]], errStr ) )
lfns = list( set( [lfn for dirName in res['Value']['Successful'] for lfn in dir4lfns[dirName] ] ) )
if lfns:
self.log.debug( "removeFile: Attempting to remove %s files from Storage and Catalogue. Get replicas first" % len( lfns ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "DataManager.removeFile: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
lfnDict = res['Value']['Successful']
for lfn, reason in res['Value'].get( 'Failed', {} ).items():
# Ignore files missing in FC if force is set
if reason == 'No such file or directory' and force:
successful[lfn] = True
elif reason == 'File has zero replicas':
lfnDict[lfn] = {}
else:
failed[lfn] = reason
res = self.__removeFile( lfnDict )
if not res['OK']:
errStr = "removeFile: Completely failed to remove files."
self.log.debug( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
gDataStoreClient.commit()
return S_OK( resDict )
def __removeFile( self, lfnDict ):
""" remove file """
storageElementDict = {}
# # sorted and reversed
for lfn, repDict in sorted( lfnDict.items(), reverse = True ):
for se, pfn in repDict.items():
storageElementDict.setdefault( se, [] ).append( lfn )
failed = {}
successful = {}
for storageElementName in sorted( storageElementDict ):
lfns = storageElementDict[storageElementName]
res = self.__removeReplica( storageElementName, lfns, replicaDict = lfnDict )
if not res['OK']:
errStr = res['Message']
for lfn in lfns:
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
else:
for lfn, errStr in res['Value']['Failed'].items():
failed[lfn] = failed.setdefault( lfn, '' ) + " %s" % errStr
completelyRemovedFiles = []
for lfn in [lfn for lfn in lfnDict if lfn not in failed]:
completelyRemovedFiles.append( lfn )
if completelyRemovedFiles:
res = self.fc.removeFile( completelyRemovedFiles )
if not res['OK']:
for lfn in completelyRemovedFiles:
failed[lfn] = "Failed to remove file from the catalog: %s" % res['Message']
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplica( self, storageElementName, lfn ):
""" Remove replica at the supplied Storage Element from Storage Element then file catalogue
'storageElementName' is the storage where the file is to be removed
'lfn' is the file to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
successful = {}
failed = {}
# Check that we have write permissions to this file.
res = self.__verifyWritePermission( lfns )
if not res['OK']:
return res
if res['Value']['Failed']:
errStr = "removeReplica: Write access not permitted for this credential."
self.log.debug( errStr, 'for %d files' % len( res['Value']['Failed'] ) )
failed.update( dict.fromkeys( res['Value']['Failed'], errStr ) )
lfns = [lfn for lfn in lfns if lfn not in res['Value']['Failed']]
self.log.debug( "removeReplica: Will remove catalogue entry for %s lfns at %s." % ( len( lfns ),
storageElementName ) )
res = self.fc.getReplicas( lfns, True )
if not res['OK']:
errStr = "removeReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
replicaDict = res['Value']['Successful']
lfnsToRemove = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
elif len( repDict ) == 1:
# The file has only a single replica so don't remove
self.log.debug( "The replica you are trying to remove is the only one.", "%s @ %s" % ( lfn,
storageElementName ) )
failed[lfn] = "Failed to remove sole replica"
else:
lfnsToRemove.append( lfn )
if not lfnsToRemove:
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
res = self.__removeReplica( storageElementName, lfnsToRemove, replicaDict = replicaDict )
if not res['OK']:
return res
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
gDataStoreClient.commit()
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def __removeReplica( self, storageElementName, lfns, replicaDict = None ):
""" remove replica
Remove the replica from the storageElement, and then from the catalog
:param storageElementName : The name of the storage Element
:param lfns list of lfn we want to remove
:param replicaDict : cache of fc.getReplicas(lfns) : { lfn { se : catalog url } }
"""
failed = {}
successful = {}
replicaDict = replicaDict if replicaDict else {}
lfnsToRemove = []
for lfn in lfns:
res = self.__verifyWritePermission( lfn )
if not res['OK']:
return res
if lfn not in res['Value']['Successful']:
errStr = "__removeReplica: Write access not permitted for this credential."
self.log.debug( errStr, lfn )
failed[lfn] = errStr
else:
lfnsToRemove.append( lfn )
res = self.__removePhysicalReplica( storageElementName, lfnsToRemove, replicaDict = replicaDict )
if not res['OK']:
errStr = "__removeReplica: Failed to remove physical replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( res['Message'] )
failed.update( dict( [( lfn, error ) for lfn, error in res['Value']['Failed'].items()] ) )
# Here we use the FC PFN...
replicaTuples = [( lfn, replicaDict[lfn][storageElementName], storageElementName ) for lfn in res['Value']['Successful']]
if not replicaTuples:
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
res = self.__removeCatalogReplica( replicaTuples )
if not res['OK']:
errStr = "__removeReplica: Completely failed to remove physical files."
self.log.debug( errStr, res['Message'] )
failed.update( dict.fromkeys( [lfn for lfn, _pfn, _se in replicaTuples if lfn not in failed], res['Message'] ) )
successful = {}
else:
failed.update( res['Value']['Failed'] )
successful = res['Value']['Successful']
return S_OK( { 'Successful' : successful, 'Failed' : failed } )
def removeReplicaFromCatalog( self, storageElementName, lfn ):
""" remove :lfn: replica from :storageElementName: SE
:param self: self reference
:param str storageElementName: SE name
:param mixed lfn: a single LFN or list of LFNs
"""
# Remove replica from the file catalog 'lfn' are the file
# to be removed 'storageElementName' is the storage where the file is to be removed
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removeReplicaFromCatalog: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
self.log.debug( "removeReplicaFromCatalog: Will remove catalogue entry for %s lfns at %s." % \
( len( lfns ), storageElementName ) )
res = self.fc.getReplicas( lfns, allStatus = True )
if not res['OK']:
errStr = "removeReplicaFromCatalog: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed = {}
successful = {}
for lfn, reason in res['Value']['Failed'].items():
if reason in ( 'No such file or directory', 'File has zero replicas' ):
successful[lfn] = True
else:
failed[lfn] = reason
replicaTuples = []
for lfn, repDict in res['Value']['Successful'].items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
replicaTuples.append( ( lfn, repDict[storageElementName], storageElementName ) )
self.log.debug( "removeReplicaFromCatalog: Resolved %s pfns for catalog removal at %s." % ( len( replicaTuples ),
storageElementName ) )
res = self.__removeCatalogReplica( replicaTuples )
failed.update( res['Value']['Failed'] )
successful.update( res['Value']['Successful'] )
resDict = {'Successful':successful, 'Failed':failed}
return S_OK( resDict )
def removeCatalogPhysicalFileNames( self, replicaTuple ):
""" Remove replicas from the file catalog specified by replica tuple
'replicaTuple' is a tuple containing the replica to be removed and is of the form ( lfn, pfn, se )
"""
if type( replicaTuple ) == ListType:
replicaTuples = replicaTuple
elif type( replicaTuple ) == TupleType:
replicaTuples = [replicaTuple]
else:
errStr = "removeCatalogPhysicalFileNames: Supplied info must be tuple or list of tuples."
self.log.debug( errStr )
return S_ERROR( errStr )
return self.__removeCatalogReplica( replicaTuples )
def __removeCatalogReplica( self, replicaTuples ):
""" remove replica form catalogue
:param replicaTuples : list of (lfn, catalogPFN, se)
"""
oDataOperation = self.__initialiseAccountingObject( 'removeCatalogReplica', '', len( replicaTuples ) )
oDataOperation.setStartTime()
start = time.time()
# HACK!
replicaDict = {}
for lfn, pfn, se in replicaTuples:
replicaDict[lfn] = {'SE':se, 'PFN':pfn}
res = self.fc.removeReplica( replicaDict )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'RegistrationTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'RegistrationOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removeCatalogReplica: Completely failed to remove replica: " + res['Message']
self.log.debug( errStr )
return S_ERROR( errStr )
success = res['Value']['Successful']
failed = res['Value']['Failed']
for lfn, error in failed.items():
# Ignore error if file doesn't exist
# This assumes all catalogs return an error as { catalog : error }
for catalog, err in error.items():
if 'no such file' in err.lower():
success.setdefault( lfn, {} ).update( { catalog : True} )
error.pop( catalog )
if not failed[lfn]:
failed.pop( lfn )
else:
self.log.error( "__removeCatalogReplica: Failed to remove replica.", "%s %s" % ( lfn, error ) )
# Only for logging information
if success:
self.log.debug( "__removeCatalogReplica: Removed %d replicas" % len( success ) )
for lfn in success:
self.log.debug( "__removeCatalogReplica: Successfully removed replica.", lfn )
oDataOperation.setValueByKey( 'RegistrationOK', len( success ) )
gDataStoreClient.addRegister( oDataOperation )
return res
def removePhysicalReplicaLegacy( self, storageElementName, lfn ):
""" Remove replica from Storage Element.
'lfn' are the files to be removed
'storageElementName' is the storage where the file is to be removed
"""
if type( lfn ) == ListType:
lfns = lfn
elif type( lfn ) == StringType:
lfns = [lfn]
else:
errStr = "removePhysicalReplica: Supplied lfns must be string or list of strings."
self.log.debug( errStr )
return S_ERROR( errStr )
successful = {}
failed = {}
# Check that we have write permissions to this directory.
res = self.__verifyWritePermission( lfns )
if not res['OK']:
return res
if res['Value']['Failed']:
errStr = "removePhysicalReplica: Write access not permitted for this credential."
self.log.debug( errStr, 'for %d files' % len( res['Value']['Failed'] ) )
failed.update( dict.fromkeys( res['Value']['Failed'], errStr ) )
lfns = [lfn for lfn in lfns if lfn not in res['Value']['Failed']]
self.log.debug( "removePhysicalReplica: Attempting to remove %s lfns at %s." % ( len( lfns ),
storageElementName ) )
self.log.debug( "removePhysicalReplica: Attempting to resolve replicas." )
res = self.getReplicas( lfns )
if not res['OK']:
errStr = "removePhysicalReplica: Completely failed to get replicas for lfns."
self.log.debug( errStr, res['Message'] )
return res
failed.update( res['Value']['Failed'] )
replicaDict = res['Value']['Successful']
successful = {}
lfnsToRemove = []
for lfn, repDict in replicaDict.items():
if storageElementName not in repDict:
# The file doesn't exist at the storage element so don't have to remove it
successful[lfn] = True
else:
lfnsToRemove.append( lfn )
self.log.debug( "removePhysicalReplica: Resolved %s pfns for removal at %s." % ( len( lfnsToRemove ),
storageElementName ) )
res = self.__removePhysicalReplica( storageElementName, lfnsToRemove, replicaDict = replicaDict )
for lfn, error in res['Value']['Failed'].items():
failed[lfn] = error
for lfn in res['Value']['Successful']:
successful[lfn] = True
resDict = { 'Successful' : successful, 'Failed' : failed }
return S_OK( resDict )
def __removePhysicalReplica( self, storageElementName, lfnsToRemove, replicaDict = None ):
""" remove replica from storage element
:param storageElementName : name of the storage Element
:param lfnsToRemove : list of lfn to removes
:param replicaDict : cache of fc.getReplicas, to be passed to the SE
"""
self.log.debug( "__removePhysicalReplica: Attempting to remove %s pfns at %s." % ( len( lfnsToRemove ),
storageElementName ) )
storageElement = StorageElement( storageElementName, vo = self.vo )
res = storageElement.isValid()
if not res['OK']:
errStr = "__removePhysicalReplica: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( storageElementName, res['Message'] ) )
return S_ERROR( errStr )
oDataOperation = self.__initialiseAccountingObject( 'removePhysicalReplica',
storageElementName,
len( lfnsToRemove ) )
oDataOperation.setStartTime()
start = time.time()
ret = storageElement.getFileSize( lfnsToRemove, replicaDict = replicaDict )
deletedSizes = ret.get( 'Value', {} ).get( 'Successful', {} )
res = storageElement.removeFile( lfnsToRemove, replicaDict = replicaDict )
oDataOperation.setEndTime()
oDataOperation.setValueByKey( 'TransferTime', time.time() - start )
if not res['OK']:
oDataOperation.setValueByKey( 'TransferOK', 0 )
oDataOperation.setValueByKey( 'FinalStatus', 'Failed' )
gDataStoreClient.addRegister( oDataOperation )
errStr = "__removePhysicalReplica: Failed to remove replicas."
self.log.debug( errStr, res['Message'] )
return S_ERROR( errStr )
else:
for lfn, value in res['Value']['Failed'].items():
if 'No such file or directory' in value:
res['Value']['Successful'][lfn] = lfn
res['Value']['Failed'].pop( lfn )
for lfn in res['Value']['Successful']:
res['Value']['Successful'][lfn] = True
deletedSize = sum( [size for lfn, size in deletedSizes.items() if lfn in res['Value']['Successful']] )
oDataOperation.setValueByKey( 'TransferSize', deletedSize )
oDataOperation.setValueByKey( 'TransferOK', len( res['Value']['Successful'] ) )
gDataStoreClient.addRegister( oDataOperation )
infoStr = "__removePhysicalReplica: Successfully issued accounting removal request."
self.log.debug( infoStr )
return res
#########################################################################
#
# File transfer methods
#
def put( self, lfn, fileName, diracSE, path = None ):
""" Put a local file to a Storage Element
:param self: self reference
:param str lfn: LFN
:param str fileName: the full path to the local file
:param str diracSE: the Storage Element to which to put the file
:param str path: the path on the storage where the file will be put (if not provided the LFN will be used)
"""
# Check that the local file exists
if not os.path.exists( fileName ):
errStr = "put: Supplied file does not exist."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
# If the path is not provided then use the LFN path
if not path:
path = os.path.dirname( lfn )
# Obtain the size of the local file
size = getSize( fileName )
if size == 0:
errStr = "put: Supplied file is zero size."
self.log.debug( errStr, fileName )
return S_ERROR( errStr )
##########################################################
# Instantiate the destination storage element here.
storageElement = StorageElement( diracSE, vo = self.vo )
res = storageElement.isValid()
if not res['OK']:
errStr = "put: The storage element is not currently valid."
self.log.debug( errStr, "%s %s" % ( diracSE, res['Message'] ) )
return S_ERROR( errStr )
fileDict = {lfn:fileName}
successful = {}
failed = {}
##########################################################
# Perform the put here.
startTime = time.time()
res = returnSingleResult( storageElement.putFile( fileDict ) )
putTime = time.time() - startTime
if not res['OK']:
errStr = "put: Failed to put file to Storage Element."
failed[lfn] = res['Message']
self.log.debug( errStr, "%s: %s" % ( fileName, res['Message'] ) )
else:
self.log.debug( "put: Put file to storage in %s seconds." % putTime )
successful[lfn] = res['Value']
resDict = {'Successful': successful, 'Failed':failed}
return S_OK( resDict )
# def removeReplica(self,lfn,storageElementName,singleFile=False):
# def putReplica(self,lfn,storageElementName,singleFile=False):
# def replicateReplica(self,lfn,size,storageElementName,singleFile=False):
def getActiveReplicas( self, lfns ):
""" Get all the replicas for the SEs which are in Active status for reading.
"""
res = self.getReplicas( lfns, allStatus = False )
if not res['OK']:
return res
replicas = res['Value']
return self.checkActiveReplicas( replicas )
def checkActiveReplicas( self, replicaDict ):
""" Check a replica dictionary for active replicas
"""
if type( replicaDict ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict ) )
for key in [ 'Successful', 'Failed' ]:
if not key in replicaDict:
return S_ERROR( 'Missing key "%s" in replica dictionary' % key )
if type( replicaDict[key] ) != DictType:
return S_ERROR( 'Wrong argument type %s, expected a dictionary' % type( replicaDict[key] ) )
seReadStatus = {}
for lfn, replicas in replicaDict['Successful'].items():
if type( replicas ) != DictType:
del replicaDict['Successful'][ lfn ]
replicaDict['Failed'][lfn] = 'Wrong replica info'
continue
for se in replicas.keys():
# Fix the caching
readStatus = seReadStatus[se] if se in seReadStatus else seReadStatus.setdefault( se, self.__SEActive( se ).get( 'Value', {} ).get( 'Read', False ) )
if not readStatus:
replicas.pop( se )
return S_OK( replicaDict )
def __SEActive( self, se ):
""" check is SE is active """
result = StorageFactory().getStorageName( se )
if not result['OK']:
return S_ERROR( 'SE not known' )
resolvedName = result['Value']
for _i in range( 5 ):
res = self.resourceStatus.getStorageElementStatus( resolvedName, default = None )
if res['OK']:
break
if not res[ 'OK' ]:
return S_ERROR( 'SE not known' )
seStatus = { 'Read' : True, 'Write' : True }
if res['Value'][resolvedName].get( 'ReadAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Read' ] = False
if res['Value'][resolvedName].get( 'WriteAccess', 'Active' ) not in ( 'Active', 'Degraded' ):
seStatus[ 'Write' ] = False
return S_OK( seStatus )
def __initialiseAccountingObject( self, operation, se, files ):
""" create accouting record """
accountingDict = {}
accountingDict['OperationType'] = operation
result = getProxyInfo()
if not result['OK']:
userName = 'system'
else:
userName = result['Value'].get( 'username', 'unknown' )
accountingDict['User'] = userName
accountingDict['Protocol'] = 'DataManager'
accountingDict['RegistrationTime'] = 0.0
accountingDict['RegistrationOK'] = 0
accountingDict['RegistrationTotal'] = 0
accountingDict['Destination'] = se
accountingDict['TransferTotal'] = files
accountingDict['TransferOK'] = files
accountingDict['TransferSize'] = files
accountingDict['TransferTime'] = 0.0
accountingDict['FinalStatus'] = 'Successful'
accountingDict['Source'] = DIRAC.siteName()
oDataOperation = DataOperation()
oDataOperation.setValuesFromDict( accountingDict )
return oDataOperation
##########################################
#
# Defunct methods only there before checking backward compatability
#
def getReplicas( self, lfns, allStatus = True ):
""" get replicas from catalogue """
res = self.fc.getReplicas( lfns, allStatus = allStatus )
if not self.useCatalogPFN:
if res['OK']:
se_lfn = {}
catalogReplicas = res['Value']['Successful']
# We group the query to getURL by storage element to gain in speed
for lfn in catalogReplicas:
for se in catalogReplicas[lfn]:
se_lfn.setdefault( se, [] ).append( lfn )
for se in se_lfn:
seObj = StorageElement( se, vo = self.vo )
succPfn = seObj.getURL( se_lfn[se], protocol = self.registrationProtocol ).get( 'Value', {} ).get( 'Successful', {} )
for lfn in succPfn:
# catalogReplicas still points res["value"]["Successful"] so res will be updated
catalogReplicas[lfn][se] = succPfn[lfn]
return res
##################################################################################################3
# Methods from the catalogToStorage. It would all work with the direct call to the SE, but this checks
# first if the replica is known to the catalog
def __executeIfReplicaExists( self, storageElementName, lfn, method, **kwargs ):
""" a simple wrapper that allows replica querying then perform the StorageElement operation
:param self: self reference
:param str storageElementName: DIRAC SE name
:param mixed lfn: a LFN str, list of LFNs or dict with LFNs as keys
"""
# # default value
kwargs = kwargs if kwargs else {}
# # get replicas for lfn
res = FileCatalog( vo = self.vo ).getReplicas( lfn )
if not res["OK"]:
errStr = "__executeIfReplicaExists: Completely failed to get replicas for LFNs."
self.log.debug( errStr, res["Message"] )
return res
# # returned dict, get failed replicase
retDict = { "Failed": res["Value"]["Failed"],
"Successful" : {} }
# # print errors
for lfn, reason in retDict["Failed"].items():
self.log.error( "_callReplicaSEFcn: Failed to get replicas for file.", "%s %s" % ( lfn, reason ) )
# # good replicas
lfnReplicas = res["Value"]["Successful"]
# # store PFN to LFN mapping
lfnList = []
se = None # Placeholder for the StorageElement object
for lfn, replicas in lfnReplicas.items():
if storageElementName in replicas:
lfnList.append( lfn )
else:
errStr = "__executeIfReplicaExists: File hasn't got replica at supplied Storage Element."
self.log.error( errStr, "%s %s" % ( lfn, storageElementName ) )
retDict["Failed"][lfn] = errStr
if 'replicaDict' not in kwargs:
kwargs['replicaDict'] = lfnReplicas
# # call StorageElement function at least
se = se = se if se else StorageElement( storageElementName, vo = self.vo )
fcn = getattr( se, method )
res = fcn( lfnList, **kwargs )
# # check result
if not res["OK"]:
errStr = "__executeIfReplicaExists: Failed to execute %s StorageElement method." % method
self.log.error( errStr, res["Message"] )
return res
# # filter out failed and successful
for lfn, lfnRes in res["Value"]["Successful"].items():
retDict["Successful"][lfn] = lfnRes
for lfn, errorMessage in res["Value"]["Failed"].items():
retDict["Failed"][lfn] = errorMessage
return S_OK( retDict )
def getReplicaIsFile( self, lfn, storageElementName ):
""" determine whether the supplied lfns are files at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "isFile" )
def getReplicaSize( self, lfn, storageElementName ):
""" get the size of files for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileSize" )
def getReplicaAccessUrl( self, lfn, storageElementName, protocol = False ):
""" get the access url for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getURL", protocol = protocol )
def getReplicaMetadata( self, lfn, storageElementName ):
""" get the file metadata for lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "getFileMetadata" )
def prestageReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" issue a prestage requests for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"prestageFile", lifetime = lifetime )
def pinReplica( self, lfn, storageElementName, lifetime = 86400 ):
""" pin the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param int lifetime: 24h in seconds
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"pinFile", lifetime = lifetime )
def releaseReplica( self, lfn, storageElementName ):
""" release pins for the lfns at the supplied StorageElement
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn, "releaseFile" )
def getReplica( self, lfn, storageElementName, localPath = False ):
""" copy replicas from DIRAC SE to local directory
:param self: self reference
:param mixed lfn: LFN string, list if LFNs or dict with LFNs as keys
:param str storageElementName: DIRAC SE name
:param mixed localPath: path in the local file system, if False, os.getcwd() will be used
:param bool singleFile: execute for the first LFN only
"""
return self.__executeIfReplicaExists( storageElementName, lfn,
"getFile", localPath = localPath )
|
miloszz/DIRAC
|
DataManagementSystem/Client/DataManager.py
|
Python
|
gpl-3.0
| 71,261
|
# -*- test-case-name: twisted.test.test_defer -*-
#
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Support for results that aren't immediately available.
API Stability: stable
Maintainer: U{Glyph Lefkowitz<mailto:glyph@twistedmatrix.com>}
"""
from __future__ import nested_scopes, generators
import traceback
import warnings
# Twisted imports
from twisted.python import log, failure
from twisted.python.util import unsignedID, mergeFunctionMetadata
class AlreadyCalledError(Exception):
pass
class TimeoutError(Exception):
pass
def logError(err):
log.err(err)
return err
def succeed(result):
"""
Return a Deferred that has already had '.callback(result)' called.
This is useful when you're writing synchronous code to an
asynchronous interface: i.e., some code is calling you expecting a
Deferred result, but you don't actually need to do anything
asynchronous. Just return defer.succeed(theResult).
See L{fail} for a version of this function that uses a failing
Deferred rather than a successful one.
@param result: The result to give to the Deferred's 'callback'
method.
@rtype: L{Deferred}
"""
d = Deferred()
d.callback(result)
return d
class _nothing: pass
def fail(result=_nothing):
"""
Return a Deferred that has already had '.errback(result)' called.
See L{succeed}'s docstring for rationale.
@param result: The same argument that L{Deferred.errback<twisted.internet.defer.Deferred.errback>} takes.
@rtype: L{Deferred}
"""
if result is _nothing:
result = failure.Failure()
d = Deferred()
d.errback(result)
return d
def execute(callable, *args, **kw):
"""Create a deferred from a callable and arguments.
Call the given function with the given arguments. Return a deferred which
has been fired with its callback as the result of that invocation or its
errback with a Failure for the exception thrown.
"""
try:
result = callable(*args, **kw)
except:
return fail()
else:
return succeed(result)
def maybeDeferred(f, *args, **kw):
"""Invoke a function that may or may not return a deferred.
Call the given function with the given arguments. If the returned
object is a C{Deferred}, return it. If the returned object is a C{Failure},
wrap it with C{fail} and return it. Otherwise, wrap it in C{succeed} and
return it. If an exception is raised, convert it to a C{Failure}, wrap it
in C{fail}, and then return it.
@type f: Any callable
@param f: The callable to invoke
@param args: The arguments to pass to C{f}
@param kw: The keyword arguments to pass to C{f}
@rtype: C{Deferred}
@return: The result of the function call, wrapped in a C{Deferred} if
necessary.
"""
deferred = None
try:
result = f(*args, **kw)
except:
return fail(failure.Failure())
else:
if isinstance(result, Deferred):
return result
elif isinstance(result, failure.Failure):
return fail(result)
else:
return succeed(result)
return deferred
def timeout(deferred):
deferred.errback(failure.Failure(TimeoutError("Callback timed out")))
def passthru(arg):
return arg
def setDebugging(on):
"""Enable or disable Deferred debugging.
When debugging is on, the call stacks from creation and invocation are
recorded, and added to any AlreadyCalledErrors we raise.
"""
Deferred.debug=bool(on)
def getDebugging():
"""Determine whether Deferred debugging is enabled.
"""
return Deferred.debug
class Deferred:
"""This is a callback which will be put off until later.
Why do we want this? Well, in cases where a function in a threaded
program would block until it gets a result, for Twisted it should
not block. Instead, it should return a Deferred.
This can be implemented for protocols that run over the network by
writing an asynchronous protocol for twisted.internet. For methods
that come from outside packages that are not under our control, we use
threads (see for example L{twisted.enterprise.adbapi}).
For more information about Deferreds, see doc/howto/defer.html or
U{http://twistedmatrix.com/projects/core/documentation/howto/defer.html}
"""
called = 0
paused = 0
timeoutCall = None
_debugInfo = None
# Keep this class attribute for now, for compatibility with code that
# sets it directly.
debug = False
def __init__(self):
self.callbacks = []
if self.debug:
self._debugInfo = DebugInfo()
self._debugInfo.creator = traceback.format_stack()[:-1]
def addCallbacks(self, callback, errback=None,
callbackArgs=None, callbackKeywords=None,
errbackArgs=None, errbackKeywords=None):
"""Add a pair of callbacks (success and error) to this Deferred.
These will be executed when the 'master' callback is run.
"""
assert callable(callback)
assert errback == None or callable(errback)
cbs = ((callback, callbackArgs, callbackKeywords),
(errback or (passthru), errbackArgs, errbackKeywords))
self.callbacks.append(cbs)
if self.called:
self._runCallbacks()
return self
def addCallback(self, callback, *args, **kw):
"""Convenience method for adding just a callback.
See L{addCallbacks}.
"""
return self.addCallbacks(callback, callbackArgs=args,
callbackKeywords=kw)
def addErrback(self, errback, *args, **kw):
"""Convenience method for adding just an errback.
See L{addCallbacks}.
"""
return self.addCallbacks(passthru, errback,
errbackArgs=args,
errbackKeywords=kw)
def addBoth(self, callback, *args, **kw):
"""Convenience method for adding a single callable as both a callback
and an errback.
See L{addCallbacks}.
"""
return self.addCallbacks(callback, callback,
callbackArgs=args, errbackArgs=args,
callbackKeywords=kw, errbackKeywords=kw)
def chainDeferred(self, d):
"""Chain another Deferred to this Deferred.
This method adds callbacks to this Deferred to call d's callback or
errback, as appropriate."""
return self.addCallbacks(d.callback, d.errback)
def callback(self, result):
"""Run all success callbacks that have been added to this Deferred.
Each callback will have its result passed as the first
argument to the next; this way, the callbacks act as a
'processing chain'. Also, if the success-callback returns a Failure
or raises an Exception, processing will continue on the *error*-
callback chain.
"""
assert not isinstance(result, Deferred)
self._startRunCallbacks(result)
def errback(self, fail=None):
"""Run all error callbacks that have been added to this Deferred.
Each callback will have its result passed as the first
argument to the next; this way, the callbacks act as a
'processing chain'. Also, if the error-callback returns a non-Failure
or doesn't raise an Exception, processing will continue on the
*success*-callback chain.
If the argument that's passed to me is not a failure.Failure instance,
it will be embedded in one. If no argument is passed, a failure.Failure
instance will be created based on the current traceback stack.
Passing a string as `fail' is deprecated, and will be punished with
a warning message.
"""
if not isinstance(fail, failure.Failure):
fail = failure.Failure(fail)
self._startRunCallbacks(fail)
def pause(self):
"""Stop processing on a Deferred until L{unpause}() is called.
"""
self.paused = self.paused + 1
def unpause(self):
"""Process all callbacks made since L{pause}() was called.
"""
self.paused = self.paused - 1
if self.paused:
return
if self.called:
self._runCallbacks()
def _continue(self, result):
self.result = result
self.unpause()
def _startRunCallbacks(self, result):
if self.called:
if self.debug:
if self._debugInfo is None:
self._debugInfo = DebugInfo()
extra = "\n" + self._debugInfo._getDebugTracebacks()
raise AlreadyCalledError(extra)
raise AlreadyCalledError
if self.debug:
if self._debugInfo is None:
self._debugInfo = DebugInfo()
self._debugInfo.invoker = traceback.format_stack()[:-2]
self.called = True
self.result = result
if self.timeoutCall:
try:
self.timeoutCall.cancel()
except:
pass
del self.timeoutCall
self._runCallbacks()
def _runCallbacks(self):
if not self.paused:
cb = self.callbacks
self.callbacks = []
while cb:
item = cb.pop(0)
callback, args, kw = item[
isinstance(self.result, failure.Failure)]
args = args or ()
kw = kw or {}
try:
self.result = callback(self.result, *args, **kw)
if isinstance(self.result, Deferred):
self.callbacks = cb
# note: this will cause _runCallbacks to be called
# "recursively" sometimes... this shouldn't cause any
# problems, since all the state has been set back to
# the way it's supposed to be, but it is useful to know
# in case something goes wrong. deferreds really ought
# not to return themselves from their callbacks.
self.pause()
self.result.addBoth(self._continue)
break
except:
self.result = failure.Failure()
if isinstance(self.result, failure.Failure):
self.result.cleanFailure()
if self._debugInfo is None:
self._debugInfo = DebugInfo()
self._debugInfo.failResult = self.result
else:
if self._debugInfo is not None:
self._debugInfo.failResult = None
def setTimeout(self, seconds, timeoutFunc=timeout, *args, **kw):
"""Set a timeout function to be triggered if I am not called.
@param seconds: How long to wait (from now) before firing the
timeoutFunc.
@param timeoutFunc: will receive the Deferred and *args, **kw as its
arguments. The default timeoutFunc will call the errback with a
L{TimeoutError}.
"""
warnings.warn(
"Deferred.setTimeout is deprecated. Look for timeout "
"support specific to the API you are using instead.",
DeprecationWarning, stacklevel=2)
if self.called:
return
assert not self.timeoutCall, "Don't call setTimeout twice on the same Deferred."
from twisted.internet import reactor
self.timeoutCall = reactor.callLater(
seconds,
lambda: self.called or timeoutFunc(self, *args, **kw))
return self.timeoutCall
def __str__(self):
cname = self.__class__.__name__
if hasattr(self, 'result'):
return "<%s at %s current result: %r>" % (cname, hex(unsignedID(self)),
self.result)
return "<%s at %s>" % (cname, hex(unsignedID(self)))
__repr__ = __str__
class DebugInfo:
"""Deferred debug helper"""
failResult = None
def _getDebugTracebacks(self):
info = ''
if hasattr(self, "creator"):
info += " C: Deferred was created:\n C:"
info += "".join(self.creator).rstrip().replace("\n","\n C:")
info += "\n"
if hasattr(self, "invoker"):
info += " I: First Invoker was:\n I:"
info += "".join(self.invoker).rstrip().replace("\n","\n I:")
info += "\n"
return info
def __del__(self):
"""Print tracebacks and die.
If the *last* (and I do mean *last*) callback leaves me in an error
state, print a traceback (if said errback is a Failure).
"""
if self.failResult is not None:
log.msg("Unhandled error in Deferred:", isError=True)
debugInfo = self._getDebugTracebacks()
if debugInfo != '':
log.msg("(debug: " + debugInfo + ")", isError=True)
log.err(self.failResult)
class FirstError(Exception):
"""First error to occur in a DeferredList if fireOnOneErrback is set.
@ivar subFailure: the L{Failure} that occurred.
@ivar index: the index of the Deferred in the DeferredList where it
happened.
"""
def __init__(self, failure, index):
self.subFailure = failure
self.index = index
def __repr__(self):
return 'FirstError(%r, %d)' % (self.subFailure, self.index)
def __str__(self):
return repr(self)
def __getitem__(self, index):
warnings.warn("FirstError.__getitem__ is deprecated. "
"Use attributes instead.",
category=DeprecationWarning, stacklevel=2)
return [self.subFailure, self.index][index]
def __getslice__(self, start, stop):
warnings.warn("FirstError.__getslice__ is deprecated. "
"Use attributes instead.",
category=DeprecationWarning, stacklevel=2)
return [self.subFailure, self.index][start:stop]
def __eq__(self, other):
if isinstance(other, tuple):
return tuple(self) == other
elif isinstance(other, FirstError):
return (self.subFailure == other.subFailure and
self.index == other.index)
return False
class DeferredList(Deferred):
"""I combine a group of deferreds into one callback.
I track a list of L{Deferred}s for their callbacks, and make a single
callback when they have all completed, a list of (success, result)
tuples, 'success' being a boolean.
Note that you can still use a L{Deferred} after putting it in a
DeferredList. For example, you can suppress 'Unhandled error in Deferred'
messages by adding errbacks to the Deferreds *after* putting them in the
DeferredList, as a DeferredList won't swallow the errors. (Although a more
convenient way to do this is simply to set the consumeErrors flag)
"""
fireOnOneCallback = 0
fireOnOneErrback = 0
def __init__(self, deferredList, fireOnOneCallback=0, fireOnOneErrback=0,
consumeErrors=0):
"""Initialize a DeferredList.
@type deferredList: C{list} of L{Deferred}s
@param deferredList: The list of deferreds to track.
@param fireOnOneCallback: (keyword param) a flag indicating that
only one callback needs to be fired for me to call
my callback
@param fireOnOneErrback: (keyword param) a flag indicating that
only one errback needs to be fired for me to call
my errback
@param consumeErrors: (keyword param) a flag indicating that any errors
raised in the original deferreds should be
consumed by this DeferredList. This is useful to
prevent spurious warnings being logged.
"""
self.resultList = [None] * len(deferredList)
Deferred.__init__(self)
if len(deferredList) == 0 and not fireOnOneCallback:
self.callback(self.resultList)
# These flags need to be set *before* attaching callbacks to the
# deferreds, because the callbacks use these flags, and will run
# synchronously if any of the deferreds are already fired.
self.fireOnOneCallback = fireOnOneCallback
self.fireOnOneErrback = fireOnOneErrback
self.consumeErrors = consumeErrors
self.finishedCount = 0
index = 0
for deferred in deferredList:
deferred.addCallbacks(self._cbDeferred, self._cbDeferred,
callbackArgs=(index,SUCCESS),
errbackArgs=(index,FAILURE))
index = index + 1
def _cbDeferred(self, result, index, succeeded):
"""(internal) Callback for when one of my deferreds fires.
"""
self.resultList[index] = (succeeded, result)
self.finishedCount += 1
if not self.called:
if succeeded == SUCCESS and self.fireOnOneCallback:
self.callback((result, index))
elif succeeded == FAILURE and self.fireOnOneErrback:
self.errback(failure.Failure(FirstError(result, index)))
elif self.finishedCount == len(self.resultList):
self.callback(self.resultList)
if succeeded == FAILURE and self.consumeErrors:
result = None
return result
def _parseDListResult(l, fireOnOneErrback=0):
if __debug__:
for success, value in l:
assert success
return [x[1] for x in l]
def gatherResults(deferredList):
"""Returns list with result of given Deferreds.
This builds on C{DeferredList} but is useful since you don't
need to parse the result for success/failure.
@type deferredList: C{list} of L{Deferred}s
"""
d = DeferredList(deferredList, fireOnOneErrback=1)
d.addCallback(_parseDListResult)
return d
# Constants for use with DeferredList
SUCCESS = True
FAILURE = False
class waitForDeferred:
"""
API Stability: semi-stable
Maintainer: U{Christopher Armstrong<mailto:radix@twistedmatrix.com>}
waitForDeferred and deferredGenerator help you write Deferred-using code
that looks like it's blocking (but isn't really), with the help of
generators.
There are two important functions involved: waitForDeferred, and
deferredGenerator. They are used together, like this::
def thingummy():
thing = waitForDeferred(makeSomeRequestResultingInDeferred())
yield thing
thing = thing.getResult()
print thing #the result! hoorj!
thingummy = deferredGenerator(thingummy)
waitForDeferred returns something that you should immediately yield; when
your generator is resumed, calling thing.getResult() will either give you
the result of the Deferred if it was a success, or raise an exception if it
was a failure. Calling C{getResult} is B{absolutely mandatory}. If you do
not call it, I{your program will not work}.
deferredGenerator takes one of these waitForDeferred-using generator
functions and converts it into a function that returns a Deferred. The
result of the Deferred will be the last value that your generator yielded
unless the last value is a waitForDeferred instance, in which case the
result will be C{None}. If the function raises an unhandled exception, the
Deferred will errback instead. Remember that 'return result' won't work;
use 'yield result; return' in place of that.
Note that not yielding anything from your generator will make the Deferred
result in None. Yielding a Deferred from your generator is also an error
condition; always yield waitForDeferred(d) instead.
The Deferred returned from your deferred generator may also errback if your
generator raised an exception. For example::
def thingummy():
thing = waitForDeferred(makeSomeRequestResultingInDeferred())
yield thing
thing = thing.getResult()
if thing == 'I love Twisted':
# will become the result of the Deferred
yield 'TWISTED IS GREAT!'
return
else:
# will trigger an errback
raise Exception('DESTROY ALL LIFE')
thingummy = deferredGenerator(thingummy)
Put succinctly, these functions connect deferred-using code with this 'fake
blocking' style in both directions: waitForDeferred converts from a
Deferred to the 'blocking' style, and deferredGenerator converts from the
'blocking' style to a Deferred.
"""
def __init__(self, d):
if not isinstance(d, Deferred):
raise TypeError("You must give waitForDeferred a Deferred. You gave it %r." % (d,))
self.d = d
def getResult(self):
if isinstance(self.result, failure.Failure):
self.result.raiseException()
return self.result
def _deferGenerator(g, deferred=None):
"""
See L{waitForDeferred}.
"""
result = None
while 1:
if deferred is None:
deferred = Deferred()
try:
result = g.next()
except StopIteration:
deferred.callback(result)
return deferred
except:
deferred.errback()
return deferred
# Deferred.callback(Deferred) raises an error; we catch this case
# early here and give a nicer error message to the user in case
# they yield a Deferred. Perhaps eventually these semantics may
# change.
if isinstance(result, Deferred):
return fail(TypeError("Yield waitForDeferred(d), not d!"))
if isinstance(result, waitForDeferred):
waiting = [True, None]
# Pass vars in so they don't get changed going around the loop
def gotResult(r, waiting=waiting, result=result):
result.result = r
if waiting[0]:
waiting[0] = False
waiting[1] = r
else:
_deferGenerator(g, deferred)
result.d.addBoth(gotResult)
if waiting[0]:
# Haven't called back yet, set flag so that we get reinvoked
# and return from the loop
waiting[0] = False
return deferred
result = None # waiting[1]
def deferredGenerator(f):
"""
See L{waitForDeferred}.
"""
def unwindGenerator(*args, **kwargs):
return _deferGenerator(f(*args, **kwargs))
return mergeFunctionMetadata(f, unwindGenerator)
class _ConcurrencyPrimitive(object):
def __init__(self):
self.waiting = []
def _releaseAndReturn(self, r):
self.release()
return r
def run(*args, **kwargs):
"""Acquire, run, release.
This function takes a callable as its first argument and any
number of other positional and keyword arguments. When the
lock or semaphore is acquired, the callable will be invoked
with those arguments.
The callable may return a Deferred; if it does, the lock or
semaphore won't be released until that Deferred fires.
@return: Deferred of function result.
"""
if len(args) < 2:
if not args:
raise TypeError("run() takes at least 2 arguments, none given.")
raise TypeError("%s.run() takes at least 2 arguments, 1 given" % (
args[0].__class__.__name__,))
self, f = args[:2]
args = args[2:]
def execute(ignoredResult):
d = maybeDeferred(f, *args, **kwargs)
d.addBoth(self._releaseAndReturn)
return d
d = self.acquire()
d.addCallback(execute)
return d
class DeferredLock(_ConcurrencyPrimitive):
"""A lock for event driven systems.
API stability: Unstable
@ivar locked: True when this Lock has been acquired, false at all
other times. Do not change this value, but it is useful to
examine for the equivalent of a \"non-blocking\" acquisition.
"""
locked = 0
def acquire(self):
"""Attempt to acquire the lock.
@return: a Deferred which fires on lock acquisition.
"""
d = Deferred()
if self.locked:
self.waiting.append(d)
else:
self.locked = 1
d.callback(self)
return d
def release(self):
"""Release the lock.
Should be called by whomever did the acquire() when the shared
resource is free.
"""
assert self.locked, "Tried to release an unlocked lock"
self.locked = 0
if self.waiting:
# someone is waiting to acquire lock
self.locked = 1
d = self.waiting.pop(0)
d.callback(self)
class DeferredSemaphore(_ConcurrencyPrimitive):
"""A semaphore for event driven systems.
API stability: Unstable
"""
def __init__(self, tokens):
_ConcurrencyPrimitive.__init__(self)
self.tokens = tokens
self.limit = tokens
def acquire(self):
"""Attempt to acquire the token.
@return: a Deferred which fires on token acquisition.
"""
assert self.tokens >= 0, "Internal inconsistency?? tokens should never be negative"
d = Deferred()
if not self.tokens:
self.waiting.append(d)
else:
self.tokens = self.tokens - 1
d.callback(self)
return d
def release(self):
"""Release the token.
Should be called by whoever did the acquire() when the shared
resource is free.
"""
assert self.tokens < self.limit, "Someone released me too many times: too many tokens!"
self.tokens = self.tokens + 1
if self.waiting:
# someone is waiting to acquire token
self.tokens = self.tokens - 1
d = self.waiting.pop(0)
d.callback(self)
class QueueOverflow(Exception):
pass
class QueueUnderflow(Exception):
pass
class DeferredQueue(object):
"""An event driven queue.
API stability: Unstable
Objects may be added as usual to this queue. When an attempt is
made to retrieve an object when the queue is empty, a Deferred is
returned which will fire when an object becomes available.
@ivar size: The maximum number of objects to allow into the queue
at a time. When an attempt to add a new object would exceed this
limit, QueueOverflow is raised synchronously. None for no limit.
@ivar backlog: The maximum number of Deferred gets to allow at
one time. When an attempt is made to get an object which would
exceed this limit, QueueUnderflow is raised synchronously. None
for no limit.
"""
def __init__(self, size=None, backlog=None):
self.waiting = []
self.pending = []
self.size = size
self.backlog = backlog
def put(self, obj):
"""Add an object to this queue.
@raise QueueOverflow: Too many objects are in this queue.
"""
if self.waiting:
self.waiting.pop(0).callback(obj)
elif self.size is None or len(self.pending) < self.size:
self.pending.append(obj)
else:
raise QueueOverflow()
def get(self):
"""Attempt to retrieve and remove an object from the queue.
@return: a Deferred which fires with the next object available in the queue.
@raise QueueUnderflow: Too many (more than C{backlog})
Deferreds are already waiting for an object from this queue.
"""
if self.pending:
return succeed(self.pending.pop(0))
elif self.backlog is None or len(self.waiting) < self.backlog:
d = Deferred()
self.waiting.append(d)
return d
else:
raise QueueUnderflow()
__all__ = ["Deferred", "DeferredList", "succeed", "fail", "FAILURE", "SUCCESS",
"AlreadyCalledError", "TimeoutError", "gatherResults",
"maybeDeferred", "waitForDeferred", "deferredGenerator",
"DeferredLock", "DeferredSemaphore", "DeferredQueue",
]
|
kenorb-contrib/BitTorrent
|
twisted/internet/defer.py
|
Python
|
gpl-3.0
| 28,690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pipes
import subprocess
import sys
import time
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.conditional import Conditional
from ansible.playbook.task import Task
from ansible.plugins import lookup_loader, connection_loader, action_loader
from ansible.template import Templar
from ansible.utils.listify import listify_lookup_plugin_terms
from ansible.utils.unicode import to_unicode
from ansible.utils.debug import debug
__all__ = ['TaskExecutor']
class TaskExecutor:
'''
This is the main worker class for the executor pipeline, which
handles loading an action plugin to actually dispatch the task to
a given host. This class roughly corresponds to the old Runner()
class.
'''
# Modules that we optimize by squashing loop items into a single call to
# the module
SQUASH_ACTIONS = frozenset(('apt', 'yum', 'pkgng', 'zypper', 'dnf'))
def __init__(self, host, task, job_vars, play_context, new_stdin, loader, shared_loader_obj):
self._host = host
self._task = task
self._job_vars = job_vars
self._play_context = play_context
self._new_stdin = new_stdin
self._loader = loader
self._shared_loader_obj = shared_loader_obj
def run(self):
'''
The main executor entrypoint, where we determine if the specified
task requires looping and either runs the task with
'''
debug("in run()")
try:
# lookup plugins need to know if this task is executing from
# a role, so that it can properly find files/templates/etc.
roledir = None
if self._task._role:
roledir = self._task._role._role_path
self._job_vars['roledir'] = roledir
items = self._get_loop_items()
if items is not None:
if len(items) > 0:
item_results = self._run_loop(items)
# loop through the item results, and remember the changed/failed
# result flags based on any item there.
changed = False
failed = False
for item in item_results:
if 'changed' in item and item['changed']:
changed = True
if 'failed' in item and item['failed']:
failed = True
# create the overall result item, and set the changed/failed
# flags there to reflect the overall result of the loop
res = dict(results=item_results)
if changed:
res['changed'] = True
if failed:
res['failed'] = True
res['msg'] = 'One or more items failed'
else:
res['msg'] = 'All items completed'
else:
res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[])
else:
debug("calling self._execute()")
res = self._execute()
debug("_execute() done")
# make sure changed is set in the result, if it's not present
if 'changed' not in res:
res['changed'] = False
debug("dumping result to json")
result = json.dumps(res)
debug("done dumping result, returning")
return result
except AnsibleError, e:
return dict(failed=True, msg=to_unicode(e, nonstring='simplerepr'))
def _get_loop_items(self):
'''
Loads a lookup plugin to handle the with_* portion of a task (if specified),
and returns the items result.
'''
items = None
if self._task.loop and self._task.loop in lookup_loader:
loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, variables=self._job_vars, loader=self._loader)
items = lookup_loader.get(self._task.loop, loader=self._loader).run(terms=loop_terms, variables=self._job_vars)
return items
def _run_loop(self, items):
'''
Runs the task with the loop items specified and collates the result
into an array named 'results' which is inserted into the final result
along with the item for which the loop ran.
'''
results = []
# make copies of the job vars and task so we can add the item to
# the variables and re-validate the task with the item variable
task_vars = self._job_vars.copy()
items = self._squash_items(items, task_vars)
for item in items:
task_vars['item'] = item
try:
tmp_task = self._task.copy()
except AnsibleParserError, e:
results.append(dict(failed=True, msg=str(e)))
continue
# now we swap the internal task with the copy, execute,
# and swap them back so we can do the next iteration cleanly
(self._task, tmp_task) = (tmp_task, self._task)
res = self._execute(variables=task_vars)
(self._task, tmp_task) = (tmp_task, self._task)
# now update the result with the item info, and append the result
# to the list of results
res['item'] = item
results.append(res)
return results
def _squash_items(self, items, variables):
'''
Squash items down to a comma-separated list for certain modules which support it
(typically package management modules).
'''
if len(items) > 0 and self._task.action in self.SQUASH_ACTIONS:
final_items = []
name = self._task.args.pop('name', None) or self._task.args.pop('pkg', None)
for item in items:
variables['item'] = item
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
if self._task.evaluate_conditional(templar, variables):
if templar._contains_vars(name):
new_item = templar.template(name)
final_items.append(new_item)
else:
final_items.append(item)
joined_items = ",".join(final_items)
self._task.args['name'] = joined_items
return [joined_items]
else:
return items
def _execute(self, variables=None):
'''
The primary workhorse of the executor system, this runs the task
on the specified host (which may be the delegated_to host) and handles
the retry/until and block rescue/always execution
'''
if variables is None:
variables = self._job_vars
templar = Templar(loader=self._loader, shared_loader_obj=self._shared_loader_obj, variables=variables)
# fields set from the play/task may be based on variables, so we have to
# do the same kind of post validation step on it here before we use it.
self._play_context.post_validate(templar=templar)
# now that the play context is finalized, we can add 'magic'
# variables to the variable dictionary
self._play_context.update_vars(variables)
# Evaluate the conditional (if any) for this task, which we do before running
# the final task post-validation. We do this before the post validation due to
# the fact that the conditional may specify that the task be skipped due to a
# variable not being present which would otherwise cause validation to fail
if not self._task.evaluate_conditional(templar, variables):
debug("when evaulation failed, skipping this task")
return dict(changed=False, skipped=True, skip_reason='Conditional check failed')
# Now we do final validation on the task, which sets all fields to their final values.
# In the case of debug tasks, we save any 'var' params and restore them after validating
# so that variables are not replaced too early.
prev_var = None
if self._task.action == 'debug' and 'var' in self._task.args:
prev_var = self._task.args.pop('var')
self._task.post_validate(templar=templar)
if prev_var is not None:
self._task.args['var'] = prev_var
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host
if self._task.action == 'include':
include_variables = self._task.args.copy()
include_file = include_variables.get('_raw_params')
del include_variables['_raw_params']
return dict(include=include_file, include_variables=include_variables)
# get the connection and the handler for this execution
self._connection = self._get_connection(variables)
self._connection.set_host_overrides(host=self._host)
self._handler = self._get_action_handler(connection=self._connection, templar=templar)
# And filter out any fields which were set to default(omit), and got the omit token value
omit_token = variables.get('omit')
if omit_token is not None:
self._task.args = dict(filter(lambda x: x[1] != omit_token, self._task.args.iteritems()))
# Read some values from the task, so that we can modify them if need be
retries = self._task.retries
if retries <= 0:
retries = 1
delay = self._task.delay
if delay < 0:
delay = 1
# make a copy of the job vars here, in case we need to update them
# with the registered variable value later on when testing conditions
vars_copy = variables.copy()
debug("starting attempt loop")
result = None
for attempt in range(retries):
if attempt > 0:
# FIXME: this should use the callback/message passing mechanism
print("FAILED - RETRYING: %s (%d retries left). Result was: %s" % (self._task, retries-attempt, result))
result['attempts'] = attempt + 1
debug("running the handler")
result = self._handler.run(task_vars=variables)
debug("handler run complete")
if self._task.async > 0:
# the async_wrapper module returns dumped JSON via its stdout
# response, so we parse it here and replace the result
try:
result = json.loads(result.get('stdout'))
except ValueError, e:
return dict(failed=True, msg="The async task did not return valid JSON: %s" % str(e))
if self._task.poll > 0:
result = self._poll_async_result(result=result, templar=templar)
# update the local copy of vars with the registered value, if specified,
# or any facts which may have been generated by the module execution
if self._task.register:
vars_copy[self._task.register] = result
if 'ansible_facts' in result:
vars_copy.update(result['ansible_facts'])
# create a conditional object to evaluate task conditions
cond = Conditional(loader=self._loader)
# FIXME: make sure until is mutually exclusive with changed_when/failed_when
if self._task.until:
cond.when = self._task.until
if cond.evaluate_conditional(templar, vars_copy):
break
elif (self._task.changed_when or self._task.failed_when) and 'skipped' not in result:
if self._task.changed_when:
cond.when = [ self._task.changed_when ]
result['changed'] = cond.evaluate_conditional(templar, vars_copy)
if self._task.failed_when:
cond.when = [ self._task.failed_when ]
failed_when_result = cond.evaluate_conditional(templar, vars_copy)
result['failed_when_result'] = result['failed'] = failed_when_result
if failed_when_result:
break
elif 'failed' not in result and result.get('rc', 0) == 0:
# if the result is not failed, stop trying
break
if attempt < retries - 1:
time.sleep(delay)
# do the final update of the local variables here, for both registered
# values and any facts which may have been created
if self._task.register:
variables[self._task.register] = result
if 'ansible_facts' in result:
variables.update(result['ansible_facts'])
# save the notification target in the result, if it was specified, as
# this task may be running in a loop in which case the notification
# may be item-specific, ie. "notify: service {{item}}"
if self._task.notify is not None:
result['ansible_notify'] = self._task.notify
# and return
debug("attempt loop complete, returning result")
return result
def _poll_async_result(self, result, templar):
'''
Polls for the specified JID to be complete
'''
async_jid = result.get('ansible_job_id')
if async_jid is None:
return dict(failed=True, msg="No job id was returned by the async task")
# Create a new psuedo-task to run the async_status module, and run
# that (with a sleep for "poll" seconds between each retry) until the
# async time limit is exceeded.
async_task = Task().load(dict(action='async_status jid=%s' % async_jid))
# Because this is an async task, the action handler is async. However,
# we need the 'normal' action handler for the status check, so get it
# now via the action_loader
normal_handler = action_loader.get(
'normal',
task=async_task,
connection=self._connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
time_left = self._task.async
while time_left > 0:
time.sleep(self._task.poll)
async_result = normal_handler.run()
if int(async_result.get('finished', 0)) == 1 or 'failed' in async_result or 'skipped' in async_result:
break
time_left -= self._task.poll
if int(async_result.get('finished', 0)) != 1:
return dict(failed=True, msg="async task did not complete within the requested time")
else:
return async_result
def _get_connection(self, variables):
'''
Reads the connection property for the host, and returns the
correct connection object from the list of connection plugins
'''
# FIXME: delegate_to calculation should be done here
# FIXME: calculation of connection params/auth stuff should be done here
if not self._play_context.remote_addr:
self._play_context.remote_addr = self._host.ipv4_address
if self._task.delegate_to is not None:
self._compute_delegate(variables)
conn_type = self._play_context.connection
if conn_type == 'smart':
conn_type = 'ssh'
if sys.platform.startswith('darwin') and self._play_context.password:
# due to a current bug in sshpass on OSX, which can trigger
# a kernel panic even for non-privileged users, we revert to
# paramiko on that OS when a SSH password is specified
conn_type = "paramiko"
else:
# see if SSH can support ControlPersist if not use paramiko
try:
cmd = subprocess.Popen(['ssh','-o','ControlPersist'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if "Bad configuration option" in err:
conn_type = "paramiko"
except OSError:
conn_type = "paramiko"
connection = connection_loader.get(conn_type, self._play_context, self._new_stdin)
if not connection:
raise AnsibleError("the connection plugin '%s' was not found" % conn_type)
return connection
def _get_action_handler(self, connection, templar):
'''
Returns the correct action plugin to handle the requestion task action
'''
if self._task.action in action_loader:
if self._task.async != 0:
raise AnsibleError("async mode is not supported with the %s module" % module_name)
handler_name = self._task.action
elif self._task.async == 0:
handler_name = 'normal'
else:
handler_name = 'async'
handler = action_loader.get(
handler_name,
task=self._task,
connection=connection,
play_context=self._play_context,
loader=self._loader,
templar=templar,
shared_loader_obj=self._shared_loader_obj,
)
if not handler:
raise AnsibleError("the handler '%s' was not found" % handler_name)
return handler
def _compute_delegate(self, variables):
# get the vars for the delegate by its name
try:
this_info = variables['hostvars'][self._task.delegate_to]
# get the real ssh_address for the delegate and allow ansible_ssh_host to be templated
#self._play_context.remote_user = self._compute_delegate_user(self.delegate_to, delegate['inject'])
self._play_context.remote_addr = this_info.get('ansible_ssh_host', self._task.delegate_to)
self._play_context.port = this_info.get('ansible_ssh_port', self._play_context.port)
self._play_context.password = this_info.get('ansible_ssh_pass', self._play_context.password)
self._play_context.private_key_file = this_info.get('ansible_ssh_private_key_file', self._play_context.private_key_file)
self._play_context.connection = this_info.get('ansible_connection', C.DEFAULT_TRANSPORT)
self._play_context.become_pass = this_info.get('ansible_sudo_pass', self._play_context.become_pass)
except:
# make sure the inject is empty for non-inventory hosts
this_info = {}
if self._play_context.remote_addr in ('127.0.0.1', 'localhost'):
self._play_context.connection = 'local'
# Last chance to get private_key_file from global variables.
# this is useful if delegated host is not defined in the inventory
#if delegate['private_key_file'] is None:
# delegate['private_key_file'] = remote_inject.get('ansible_ssh_private_key_file', None)
#if delegate['private_key_file'] is not None:
# delegate['private_key_file'] = os.path.expanduser(delegate['private_key_file'])
for i in this_info:
if i.startswith("ansible_") and i.endswith("_interpreter"):
variables[i] = this_info[i]
|
dmccue/ansible
|
lib/ansible/executor/task_executor.py
|
Python
|
gpl-3.0
| 20,470
|
'''
This file is part of TvTumbler.
@author: Dermot Buckley
@copyright: Copyright (c) 2013, Dermot Buckley
@license: GPL
@contact: info@tvtumbler.com
'''
import time
import xbmc
from . import logger, fastcache, blacklist, epdb, showsettings
from .names import scene
_fastcache_expire_last_run = time.time()
_blacklist_expire_last_run = time.time()
_epdb_last_run = time.time()
_showsettings_purge_last_run = time.time()
def run():
global _fastcache_expire_last_run, _blacklist_expire_last_run, _epdb_last_run, _showsettings_purge_last_run
logger.debug('housekeeper - run')
if xbmc.Player().isPlaying():
logger.debug('XBMC is Playing, skipping housekeeping')
return
from . import main
if xbmc.abortRequested or main.shutdownRequested:
return
if time.time() - _fastcache_expire_last_run > 60 * 60 * 24: # 24 hrs
fastcache.expire_old_records()
_fastcache_expire_last_run = time.time()
if xbmc.abortRequested or main.shutdownRequested:
return
if time.time() - _blacklist_expire_last_run > 60 * 60 * 36: # 36 hrs
blacklist.expire_old_records()
_blacklist_expire_last_run = time.time()
if xbmc.abortRequested or main.shutdownRequested:
return
scene.update_if_needed()
if xbmc.abortRequested or main.shutdownRequested:
return
# We try to refresh the shows every 42 minutes. Only shows actually needing a refresh
# will be refreshed. We limit each run to 10 shows (the 10 oldest).
if time.time() - _epdb_last_run > 60 * 42:
epdb.refresh_needed_shows(show_limit=10)
_epdb_last_run = time.time()
if xbmc.abortRequested or main.shutdownRequested:
return
if time.time() - _showsettings_purge_last_run > 60 * 60 * 13: # 13 hours
showsettings.purge_missing_shows()
_showsettings_purge_last_run = time.time()
logger.debug('housekeeper is finished')
|
maliciamrg/xbmc-addon-tvtumbler
|
tvtumbler/housekeeper.py
|
Python
|
gpl-3.0
| 1,946
|