code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras hashing preprocessing layer."""
# pylint: disable=g-classes-have-attributes
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras.engine import base_preprocessing_layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_functional_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.util.tf_export import keras_export
# Default key from tf.sparse.cross_hashed
_DEFAULT_SALT_KEY = [0xDECAFCAFFE, 0xDECAFCAFFE]
@keras_export('keras.layers.experimental.preprocessing.Hashing')
class Hashing(base_preprocessing_layer.PreprocessingLayer):
"""Implements categorical feature hashing, also known as "hashing trick".
This layer transforms single or multiple categorical inputs to hashed output.
It converts a sequence of int or string to a sequence of int. The stable hash
function uses tensorflow::ops::Fingerprint to produce universal output that
is consistent across platforms.
This layer uses [FarmHash64](https://github.com/google/farmhash) by default,
which provides a consistent hashed output across different platforms and is
stable across invocations, regardless of device and context, by mixing the
input bits thoroughly.
If you want to obfuscate the hashed output, you can also pass a random `salt`
argument in the constructor. In that case, the layer will use the
[SipHash64](https://github.com/google/highwayhash) hash function, with
the `salt` value serving as additional input to the hash function.
Example (FarmHash64):
>>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[0],
[1],
[1],
[2]])>
Example (FarmHash64) with list of inputs:
>>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3)
>>> inp_1 = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> inp_2 = np.asarray([[5], [4], [3], [2], [1]])
>>> layer([inp_1, inp_2])
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[1],
[0],
[2],
[0]])>
Example (SipHash64):
>>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3,
... salt=[133, 137])
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[1],
[2],
[1],
[0],
[2]])>
Example (Siphash64 with a single integer, same as `salt=[133, 133]`
>>> layer = tf.keras.layers.experimental.preprocessing.Hashing(num_bins=3,
... salt=133)
>>> inp = [['A'], ['B'], ['C'], ['D'], ['E']]
>>> layer(inp)
<tf.Tensor: shape=(5, 1), dtype=int64, numpy=
array([[0],
[0],
[2],
[1],
[0]])>
Reference: [SipHash with salt](https://www.131002.net/siphash/siphash.pdf)
Arguments:
num_bins: Number of hash bins.
salt: A single unsigned integer or None.
If passed, the hash function used will be SipHash64, with these values
used as an additional input (known as a "salt" in cryptography).
These should be non-zero. Defaults to `None` (in that
case, the FarmHash64 hash function is used). It also supports
tuple/list of 2 unsigned integer numbers, see reference paper for details.
name: Name to give to the layer.
**kwargs: Keyword arguments to construct a layer.
Input shape: A single or list of string, int32 or int64 `Tensor`,
`SparseTensor` or `RaggedTensor` of shape `[batch_size, ...,]`
Output shape: An int64 `Tensor`, `SparseTensor` or `RaggedTensor` of shape
`[batch_size, ...]`. If any input is `RaggedTensor` then output is
`RaggedTensor`, otherwise if any input is `SparseTensor` then output is
`SparseTensor`, otherwise the output is `Tensor`.
"""
def __init__(self, num_bins, salt=None, name=None, **kwargs):
if num_bins is None or num_bins <= 0:
raise ValueError('`num_bins` cannot be `None` or non-positive values.')
super(Hashing, self).__init__(name=name, **kwargs)
base_preprocessing_layer.keras_kpl_gauge.get_cell('Hashing').set(True)
self.num_bins = num_bins
self.strong_hash = True if salt is not None else False
if salt is not None:
if isinstance(salt, (tuple, list)) and len(salt) == 2:
self.salt = salt
elif isinstance(salt, int):
self.salt = [salt, salt]
else:
raise ValueError('`salt can only be a tuple of size 2 integers, or a '
'single integer, given {}'.format(salt))
else:
self.salt = _DEFAULT_SALT_KEY
def _preprocess_single_input(self, inp):
if isinstance(inp, (list, tuple, np.ndarray)):
inp = ops.convert_to_tensor_v2_with_dispatch(inp)
return inp
def _preprocess_inputs(self, inputs):
if isinstance(inputs, (tuple, list)):
# If any of them is tensor or ndarray, then treat as list
if any(
tensor_util.is_tensor(inp) or isinstance(inp, np.ndarray)
for inp in inputs):
return [self._preprocess_single_input(inp) for inp in inputs]
return self._preprocess_single_input(inputs)
def call(self, inputs):
inputs = self._preprocess_inputs(inputs)
if isinstance(inputs, (tuple, list)):
return self._process_input_list(inputs)
else:
return self._process_single_input(inputs)
def _process_single_input(self, inputs):
# Converts integer inputs to string.
if inputs.dtype.is_integer:
if isinstance(inputs, sparse_tensor.SparseTensor):
inputs = sparse_tensor.SparseTensor(
indices=inputs.indices,
values=string_ops.as_string(inputs.values),
dense_shape=inputs.dense_shape)
else:
inputs = string_ops.as_string(inputs)
str_to_hash_bucket = self._get_string_to_hash_bucket_fn()
if tf_utils.is_ragged(inputs):
return ragged_functional_ops.map_flat_values(
str_to_hash_bucket, inputs, num_buckets=self.num_bins, name='hash')
elif isinstance(inputs, sparse_tensor.SparseTensor):
sparse_values = inputs.values
sparse_hashed_values = str_to_hash_bucket(
sparse_values, self.num_bins, name='hash')
return sparse_tensor.SparseTensor(
indices=inputs.indices,
values=sparse_hashed_values,
dense_shape=inputs.dense_shape)
else:
return str_to_hash_bucket(inputs, self.num_bins, name='hash')
def _process_input_list(self, inputs):
# TODO(momernick): support ragged_cross_hashed with corrected fingerprint
# and siphash.
if any(isinstance(inp, ragged_tensor.RaggedTensor) for inp in inputs):
raise ValueError('Hashing with ragged input is not supported yet.')
sparse_inputs = [
inp for inp in inputs if isinstance(inp, sparse_tensor.SparseTensor)
]
dense_inputs = [
inp for inp in inputs if not isinstance(inp, sparse_tensor.SparseTensor)
]
all_dense = True if not sparse_inputs else False
indices = [sp_inp.indices for sp_inp in sparse_inputs]
values = [sp_inp.values for sp_inp in sparse_inputs]
shapes = [sp_inp.dense_shape for sp_inp in sparse_inputs]
indices_out, values_out, shapes_out = gen_sparse_ops.SparseCrossHashed(
indices=indices,
values=values,
shapes=shapes,
dense_inputs=dense_inputs,
num_buckets=self.num_bins,
strong_hash=self.strong_hash,
salt=self.salt)
sparse_out = sparse_tensor.SparseTensor(indices_out, values_out, shapes_out)
if all_dense:
return sparse_ops.sparse_tensor_to_dense(sparse_out)
return sparse_out
def _get_string_to_hash_bucket_fn(self):
"""Returns the string_to_hash_bucket op to use based on `hasher_key`."""
# string_to_hash_bucket_fast uses FarmHash64 as hash function.
if not self.strong_hash:
return string_ops.string_to_hash_bucket_fast
# string_to_hash_bucket_strong uses SipHash64 as hash function.
else:
return functools.partial(
string_ops.string_to_hash_bucket_strong, key=self.salt)
def compute_output_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)):
return input_shape
input_shapes = input_shape
batch_size = None
for inp_shape in input_shapes:
inp_tensor_shape = tensor_shape.TensorShape(inp_shape).as_list()
if len(inp_tensor_shape) != 2:
raise ValueError('Inputs must be rank 2, get {}'.format(input_shapes))
if batch_size is None:
batch_size = inp_tensor_shape[0]
# The second dimension is dynamic based on inputs.
output_shape = [batch_size, None]
return tensor_shape.TensorShape(output_shape)
def compute_output_signature(self, input_spec):
if not isinstance(input_spec, (tuple, list)):
output_shape = self.compute_output_shape(input_spec.shape)
output_dtype = dtypes.int64
if isinstance(input_spec, sparse_tensor.SparseTensorSpec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=output_dtype)
else:
return tensor_spec.TensorSpec(shape=output_shape, dtype=output_dtype)
input_shapes = [x.shape for x in input_spec]
output_shape = self.compute_output_shape(input_shapes)
if any(
isinstance(inp_spec, ragged_tensor.RaggedTensorSpec)
for inp_spec in input_spec):
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.int64)
elif any(
isinstance(inp_spec, sparse_tensor.SparseTensorSpec)
for inp_spec in input_spec):
return sparse_tensor.SparseTensorSpec(
shape=output_shape, dtype=dtypes.int64)
return tensor_spec.TensorSpec(shape=output_shape, dtype=dtypes.int64)
def get_config(self):
config = {'num_bins': self.num_bins, 'salt': self.salt}
base_config = super(Hashing, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
cxxgtxy/tensorflow
|
tensorflow/python/keras/layers/preprocessing/hashing.py
|
Python
|
apache-2.0
| 11,280
|
"""Shark IQ Wrapper."""
from __future__ import annotations
import logging
from typing import Iterable
from sharkiqpy import OperatingModes, PowerModes, Properties, SharkIqVacuum
from homeassistant.components.vacuum import (
STATE_CLEANING,
STATE_DOCKED,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STATUS,
SUPPORT_STOP,
StateVacuumEntity,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, SHARK
from .update_coordinator import SharkIqUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
# Supported features
SUPPORT_SHARKIQ = (
SUPPORT_BATTERY
| SUPPORT_FAN_SPEED
| SUPPORT_PAUSE
| SUPPORT_RETURN_HOME
| SUPPORT_START
| SUPPORT_STATE
| SUPPORT_STATUS
| SUPPORT_STOP
| SUPPORT_LOCATE
)
OPERATING_STATE_MAP = {
OperatingModes.PAUSE: STATE_PAUSED,
OperatingModes.START: STATE_CLEANING,
OperatingModes.STOP: STATE_IDLE,
OperatingModes.RETURN: STATE_RETURNING,
}
FAN_SPEEDS_MAP = {
"Eco": PowerModes.ECO,
"Normal": PowerModes.NORMAL,
"Max": PowerModes.MAX,
}
STATE_RECHARGING_TO_RESUME = "recharging_to_resume"
# Attributes to expose
ATTR_ERROR_CODE = "last_error_code"
ATTR_ERROR_MSG = "last_error_message"
ATTR_LOW_LIGHT = "low_light"
ATTR_RECHARGE_RESUME = "recharge_and_resume"
ATTR_RSSI = "rssi"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Shark IQ vacuum cleaner."""
coordinator: SharkIqUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
devices: Iterable[SharkIqVacuum] = coordinator.shark_vacs.values()
device_names = [d.name for d in devices]
_LOGGER.debug(
"Found %d Shark IQ device(s): %s",
len(device_names),
", ".join([d.name for d in devices]),
)
async_add_entities([SharkVacuumEntity(d, coordinator) for d in devices])
class SharkVacuumEntity(CoordinatorEntity, StateVacuumEntity):
"""Shark IQ vacuum entity."""
def __init__(self, sharkiq: SharkIqVacuum, coordinator: SharkIqUpdateCoordinator):
"""Create a new SharkVacuumEntity."""
super().__init__(coordinator)
self.sharkiq = sharkiq
def clean_spot(self, **kwargs):
"""Clean a spot. Not yet implemented."""
raise NotImplementedError()
def send_command(self, command, params=None, **kwargs):
"""Send a command to the vacuum. Not yet implemented."""
raise NotImplementedError()
@property
def is_online(self) -> bool:
"""Tell us if the device is online."""
return self.coordinator.device_is_online(self.sharkiq.serial_number)
@property
def name(self) -> str:
"""Device name."""
return self.sharkiq.name
@property
def serial_number(self) -> str:
"""Vacuum API serial number (DSN)."""
return self.sharkiq.serial_number
@property
def model(self) -> str:
"""Vacuum model number."""
if self.sharkiq.vac_model_number:
return self.sharkiq.vac_model_number
return self.sharkiq.oem_model_number
@property
def device_info(self) -> dict:
"""Device info dictionary."""
return {
"identifiers": {(DOMAIN, self.serial_number)},
"name": self.name,
"manufacturer": SHARK,
"model": self.model,
"sw_version": self.sharkiq.get_property_value(
Properties.ROBOT_FIRMWARE_VERSION
),
}
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_SHARKIQ
@property
def is_docked(self) -> bool | None:
"""Is vacuum docked."""
return self.sharkiq.get_property_value(Properties.DOCKED_STATUS)
@property
def error_code(self) -> int | None:
"""Return the last observed error code (or None)."""
return self.sharkiq.error_code
@property
def error_message(self) -> str | None:
"""Return the last observed error message (or None)."""
if not self.error_code:
return None
return self.sharkiq.error_text
@property
def operating_mode(self) -> str | None:
"""Operating mode.."""
op_mode = self.sharkiq.get_property_value(Properties.OPERATING_MODE)
return OPERATING_STATE_MAP.get(op_mode)
@property
def recharging_to_resume(self) -> int | None:
"""Return True if vacuum set to recharge and resume cleaning."""
return self.sharkiq.get_property_value(Properties.RECHARGING_TO_RESUME)
@property
def state(self):
"""
Get the current vacuum state.
NB: Currently, we do not return an error state because they can be very, very stale.
In the app, these are (usually) handled by showing the robot as stopped and sending the
user a notification.
"""
if self.is_docked:
return STATE_DOCKED
return self.operating_mode
@property
def unique_id(self) -> str:
"""Return the unique id of the vacuum cleaner."""
return self.serial_number
@property
def available(self) -> bool:
"""Determine if the sensor is available based on API results."""
# If the last update was successful...
return self.coordinator.last_update_success and self.is_online
@property
def battery_level(self):
"""Get the current battery level."""
return self.sharkiq.get_property_value(Properties.BATTERY_CAPACITY)
async def async_return_to_base(self, **kwargs):
"""Have the device return to base."""
await self.sharkiq.async_set_operating_mode(OperatingModes.RETURN)
await self.coordinator.async_refresh()
async def async_pause(self):
"""Pause the cleaning task."""
await self.sharkiq.async_set_operating_mode(OperatingModes.PAUSE)
await self.coordinator.async_refresh()
async def async_start(self):
"""Start the device."""
await self.sharkiq.async_set_operating_mode(OperatingModes.START)
await self.coordinator.async_refresh()
async def async_stop(self, **kwargs):
"""Stop the device."""
await self.sharkiq.async_set_operating_mode(OperatingModes.STOP)
await self.coordinator.async_refresh()
async def async_locate(self, **kwargs):
"""Cause the device to generate a loud chirp."""
await self.sharkiq.async_find_device()
@property
def fan_speed(self) -> str:
"""Return the current fan speed."""
fan_speed = None
speed_level = self.sharkiq.get_property_value(Properties.POWER_MODE)
for k, val in FAN_SPEEDS_MAP.items():
if val == speed_level:
fan_speed = k
return fan_speed
async def async_set_fan_speed(self, fan_speed: str, **kwargs):
"""Set the fan speed."""
await self.sharkiq.async_set_property_value(
Properties.POWER_MODE, FAN_SPEEDS_MAP.get(fan_speed.capitalize())
)
await self.coordinator.async_refresh()
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(FAN_SPEEDS_MAP)
# Various attributes we want to expose
@property
def recharge_resume(self) -> bool | None:
"""Recharge and resume mode active."""
return self.sharkiq.get_property_value(Properties.RECHARGE_RESUME)
@property
def rssi(self) -> int | None:
"""Get the WiFi RSSI."""
return self.sharkiq.get_property_value(Properties.RSSI)
@property
def low_light(self):
"""Let us know if the robot is operating in low-light mode."""
return self.sharkiq.get_property_value(Properties.LOW_LIGHT_MISSION)
@property
def extra_state_attributes(self) -> dict:
"""Return a dictionary of device state attributes specific to sharkiq."""
data = {
ATTR_ERROR_CODE: self.error_code,
ATTR_ERROR_MSG: self.sharkiq.error_text,
ATTR_LOW_LIGHT: self.low_light,
ATTR_RECHARGE_RESUME: self.recharge_resume,
}
return data
|
w1ll1am23/home-assistant
|
homeassistant/components/sharkiq/vacuum.py
|
Python
|
apache-2.0
| 8,401
|
#!/usr/bin/env python
# -*- mode: python; encoding: utf-8 -*-
#
"""This plugin renders the filesystem in a tree and a table."""
import cgi
import os
import random
import socket
from django import http
from M2Crypto import X509
from grr.gui import renderers
from grr.gui.plugins import fileview_widgets
from grr.gui.plugins import forms
from grr.gui.plugins import semantic
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flow
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.flows.general import export
class BufferReferenceRenderer(semantic.RDFProtoRenderer):
"""Render the buffer reference."""
classname = "BufferReference"
name = "Buffer Reference"
def Hexify(self, _, data):
"""Render a hexdump of the data."""
results = []
idx = 0
while idx < len(data):
raw = ""
result = ""
for _ in range(16):
ord_value = ord(data[idx])
result += "%02X " % ord_value
if ord_value > 32 and ord_value < 127:
raw += cgi.escape(data[idx])
else:
raw += "."
idx += 1
if idx >= len(data):
break
results.append(result + " " * (16 * 3 - len(result)) + raw)
return "<pre>%s</pre>" % "\n".join(results)
translator = dict(data=Hexify)
class StatModeRenderer(semantic.RDFValueRenderer):
"""Renders stat mode fields."""
classname = "StatMode"
layout_template = renderers.Template("""
<abbr title="Mode {{this.oct}}">{{this.mode_string|escape}}</abbr>""")
def Layout(self, request, response):
self.oct = oct(int(self.proxy))
self.mode_string = unicode(self.proxy)
return super(StatModeRenderer, self).Layout(request, response)
class StatEntryRenderer(semantic.RDFProtoRenderer):
"""Nicely format the StatEntry rdfvalue."""
classname = "StatEntry"
name = "Stat Entry"
def TranslateRegistryData(self, request, registry_data):
if registry_data.HasField("data"):
ret = repr(registry_data.GetValue())
else:
ret = utils.SmartStr(registry_data.GetValue())
# This is not escaped by the template!
return renderers.EscapingRenderer(ret).RawHTML(request)
translator = dict(registry_data=TranslateRegistryData)
class GrrMessageRenderer(semantic.RDFProtoRenderer):
"""Nicely format the GrrMessage rdfvalue."""
classname = "GrrMessage"
name = "GrrMessage"
def RenderPayload(self, request, unused_value):
rdf_object = self.proxy.payload
return semantic.FindRendererForObject(rdf_object).RawHTML(request)
translator = dict(args=RenderPayload)
class VolumeRenderer(semantic.RDFProtoRenderer):
"""Make the disk volume values human readable."""
classname = "Volume"
name = "Disk Volume"
def Layout(self, request, response):
"""Render the protobuf as a table."""
self.result = []
for descriptor, value in self.proxy.ListSetFields():
name = descriptor.name
friendly_name = descriptor.friendly_name or name
if name == "total_allocation_units" and value is not None:
value_str = "{0} ({1:.2f} GB)".format(
value, self.proxy.AUToGBytes(value))
self.result.append((friendly_name, descriptor.description, value_str))
elif name == "actual_available_allocation_units" and value is not None:
value_str = "{0} ({1:.2f} GB, {2:.0f}% free)".format(
value, self.proxy.AUToGBytes(value), self.proxy.FreeSpacePercent())
self.result.append((friendly_name, descriptor.description, value_str))
else:
renderer = semantic.FindRendererForObject(value)
self.result.append((friendly_name, descriptor.description,
renderer.RawHTML(request)))
return super(semantic.RDFProtoRenderer, self).Layout(request, response)
class CollectionRenderer(StatEntryRenderer):
"""Nicely format a Collection."""
classname = "CollectionList"
name = "Collection Listing"
layout_template = renderers.Template("""
<table class='proto_table'>
<thead>
<tr><th>Mode</th><th>Name</th><th>Size</th><th>Modified</th></tr>
</thead>
<tbody>
{% for row in this.result %}
<tr>
{% for value in row %}
<td class="proto_value">
{{value|safe}}
</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
""")
def Layout(self, request, response):
"""Render collections as a table."""
self.result = []
fields = "st_mode pathspec st_size st_mtime".split()
items = self.proxy.items
for item in items:
row = []
for name in fields:
value = getattr(item, name)
try:
value = self.translator[name](self, request, value)
# Regardless of what the error is, we need to escape the value.
except StandardError: # pylint: disable=broad-except
value = self.FormatFromTemplate(self.translator_error_template,
value=value)
row.append(value)
self.result.append(row)
return renderers.TemplateRenderer.Layout(self, request, response)
class GrepResultRenderer(semantic.RDFProtoRenderer):
"""Nicely format grep results."""
classname = "GrepResultList"
name = "Grep Result Listing"
layout_template = renderers.Template("""
<table class='proto_table'>
<thead>
<tr><th>Offset</th><th>Data</th></tr>
</thead>
<tbody>
{% for row in this.results %}
<tr>
{% for value in row %}
<td class="proto_value">
{{value|escape}}
</td>
{% endfor %}
</tr>
{% endfor %}
</tbody>
</table>
""")
def Layout(self, request, response):
self.results = []
for row in self.proxy:
self.results.append([row.offset, repr(row)])
return renderers.TemplateRenderer.Layout(self, request, response)
class UsersRenderer(semantic.RDFValueArrayRenderer):
classname = "Users"
name = "Users"
class NetworkAddressRenderer(semantic.RDFValueRenderer):
classname = "NetworkAddress"
name = "Network Address"
layout_template = renderers.Template("{{result|escape}}")
def Layout(self, request, response):
_ = request, response
return self.RenderFromTemplate(self.layout_template, response,
result=self.proxy.human_readable_address)
class InterfaceRenderer(semantic.RDFProtoRenderer):
"""Render a machine's interfaces."""
classname = "Interface"
name = "Interface Record"
def TranslateIp4Addresses(self, _, value):
return " ".join([socket.inet_ntop(socket.AF_INET, x) for x in value])
def TranslateMacAddress(self, _, value):
return value.human_readable_address
def TranslateIp6Addresses(self, _, value):
return " ".join([socket.inet_ntop(socket.AF_INET6, x) for x in value])
translator = dict(ip4_addresses=TranslateIp4Addresses,
ip6_addresses=TranslateIp6Addresses,
mac_address=TranslateMacAddress)
class StringListRenderer(renderers.TemplateRenderer):
"""Renders a list of strings as a proto table."""
layout_template = renderers.Template("""
<table class='proto_table'>
<tbody>
{% for string in this.strings %}
<tr><td>
{{string|escape}}
</td></tr>
{% endfor %}
</tbody>
</table>
""")
def __init__(self, strings, **kwargs):
self.strings = strings
super(StringListRenderer, self).__init__(**kwargs)
class ConnectionsRenderer(semantic.RDFValueArrayRenderer):
"""Renders connection listings."""
classname = "Connections"
name = "Connection Listing"
# The contents of result are safe since they were already escaped in
# connection_template.
layout_template = renderers.Template("""
<table class='proto_table'>
<tbody>
{% for connection in result %}
<tr>
{{connection|safe}}
</tr>
{% endfor %}
</tbody>
</table>
""")
connection_template = renderers.Template(
"""
<td>{{type|escape}}</td>
<td>{{local_address|escape}}</td>
<td>{{remote_address|escape}}</td>
<td>{{state|escape}}</td>
<td>{{pid|escape}}</td>
""")
types = {
(2, 1): "tcp",
(10, 1): "tcp6",
(23, 1): "tcp6",
(30, 1): "tcp6",
(2, 2): "udp",
(10, 2): "udp6",
(23, 2): "udp6",
(30, 2): "udp6",
}
def Layout(self, request, response):
"""Render the connection as a table."""
_ = request
result = []
for conn in self.proxy:
try:
conn_type = self.types[(conn.family, conn.type)]
except KeyError:
conn_type = "(%d,%d)" % (conn.family, conn.type)
local_address = "%s:%d" % (conn.local_address.ip,
conn.local_address.port)
if conn.remote_address.ip:
remote_address = "%s:%d" % (conn.remote_address.ip,
conn.remote_address.port)
else:
if ":" in conn.local_address.ip:
remote_address = ":::*"
else:
remote_address = "0.0.0.0:*"
result.append(self.FormatFromTemplate(self.connection_template,
type=conn_type,
local_address=local_address,
remote_address=remote_address,
state=utils.SmartStr(conn.state),
pid=conn.pid))
return self.RenderFromTemplate(self.layout_template, response,
result=sorted(result))
class NetworkConnections(ConnectionsRenderer):
"""Handle repeated NetworkConnection fields in protobufs."""
classname = "NetworkConnection"
class ProcessRenderer(semantic.RDFValueArrayRenderer):
"""Renders process listings."""
classname = "Processes"
name = "Process Listing"
def RenderFiles(self, request, file_list):
return StringListRenderer(sorted(file_list)).RawHTML(request)
translator = dict(open_files=RenderFiles)
class FilesystemRenderer(semantic.RDFValueArrayRenderer):
classname = "FileSystem"
name = "FileSystems"
class CertificateRenderer(semantic.RDFValueRenderer):
"""Render X509 Certs properly."""
classname = "RDFX509Cert"
name = "X509 Certificate"
# Implement hide/show behaviour for certificates as they tend to be long and
# uninteresting.
layout_template = renderers.Template("""
<div class='certificate_viewer' id='certificate_viewer_{{unique|escape}}'>
<ins class='fg-button ui-icon ui-icon-minus'/>
Click to show details.
<div class='contents'>
<pre>
{{ this.cert|escape }}
</pre>
</div>
</div>
""")
def Layout(self, request, response):
# Present the certificate as text
self.cert = X509.load_cert_string(str(self.proxy)).as_text()
response = super(CertificateRenderer, self).RenderAjax(request, response)
return self.CallJavascript(response, "CertificateRenderer.Layout")
class BlobArrayRenderer(semantic.RDFValueRenderer):
"""Render a blob array."""
classname = "BlobArray"
name = "Array"
layout_template = renderers.Template("""
{% for i in first %}
{{i|escape}}
{% endfor %}
{% for i in array %}
, {{i|escape}}
{% endfor %}
""")
def Layout(self, _, response):
array = []
for i in self.proxy:
for field in ["integer", "string", "data", "boolean"]:
if i.HasField(field):
array.append(getattr(i, field))
break
return self.RenderFromTemplate(self.layout_template, response,
first=array[0:1], array=array[1:])
class AgeSelector(semantic.RDFValueRenderer):
"""Allows the user to select a different version for viewing objects."""
layout_template = renderers.Template("""
<img src=static/images/window-duplicate.png class='grr-icon version-selector'>
<span age='{{this.int}}'><nobr>{{this.proxy|escape}}</nobr></span>
""")
def Layout(self, request, response):
self.int = int(self.proxy or 0)
return super(AgeSelector, self).Layout(request, response)
class AgeRenderer(AgeSelector):
classname = "RDFDatetime"
layout_template = renderers.Template("""
<span age='{{this.int}}'><nobr>{{this.proxy|escape}}</nobr></span>
""")
class AbstractFileTable(renderers.TableRenderer):
"""A table that displays the content of a directory.
Listening Javascript Events:
- tree_select(aff4_path) - A selection event on the tree informing us of the
tree path. We re-layout the entire table on this event to show the
directory listing of aff4_path.
Generated Javascript Events:
- file_select(aff4_path, age) - The full AFF4 path for the file in the
directory which is selected. Age is the latest age we wish to see.
Internal State:
- client_id.
"""
layout_template = (renderers.TableRenderer.layout_template + """
<div id="version_selector_dialog_{{unique|escape}}"
class="version-selector-dialog modal wide-modal high-modal"></div>
""")
toolbar = None # Toolbar class to render above table.
content_cache = None
post_parameters = ["aff4_path"]
root_path = "/" # Paths will all be under this path.
# This can restrict the view to only certain types of objects. It should be a
# list of types to show.
visible_types = None
def __init__(self, **kwargs):
super(AbstractFileTable, self).__init__(**kwargs)
if AbstractFileTable.content_cache is None:
AbstractFileTable.content_cache = utils.TimeBasedCache()
def RenderAjax(self, request, response):
response = super(AbstractFileTable, self).RenderAjax(request, response)
return self.CallJavascript(response, "AbstractFileTable.RenderAjax")
def Layout(self, request, response):
"""Populate the table state with the request."""
# Draw the toolbar first
if self.toolbar:
tb_cls = renderers.Renderer.classes[self.toolbar]
tb_cls().Layout(request, response)
response = super(AbstractFileTable, self).Layout(request, response)
return self.CallJavascript(response, "AbstractFileTable.Layout",
renderer=self.__class__.__name__,
client_id=self.state.get("client_id", ""))
def BuildTable(self, start_row, end_row, request):
"""Populate the table."""
# Default sort direction
sort = request.REQ.get("sort", "Name:asc")
try:
reverse_sort = sort.split(":")[1] == "desc"
except IndexError:
reverse_sort = False
filter_term = request.REQ.get("filter")
aff4_path = request.REQ.get("aff4_path", self.root_path)
urn = rdfvalue.RDFURN(aff4_path)
filter_string = None
if filter_term:
column, regex = filter_term.split(":", 1)
escaped_regex = utils.EscapeRegex(aff4_path + "/")
# The start anchor refers only to this directory.
if regex.startswith("^"):
escaped_regex += utils.EscapeRegex(regex[1:])
else:
escaped_regex += ".*" + utils.EscapeRegex(regex)
filter_string = "subject matches '%s'" % escaped_regex
# For now we just list the directory
try:
key = utils.SmartUnicode(urn)
if filter_string:
key += ":" + filter_string
# Open the directory as a directory.
directory_node = aff4.FACTORY.Open(urn, token=request.token).Upgrade(
"VFSDirectory")
if not directory_node:
raise IOError()
key += str(directory_node.Get(directory_node.Schema.LAST))
key += ":" + str(request.token)
try:
children = self.content_cache.Get(key)
except KeyError:
# Only show the direct children.
children = sorted(directory_node.Query(filter_string=filter_string,
limit=100000))
# Filter the children according to types.
if self.visible_types:
children = [x for x in children
if x.__class__.__name__ in self.visible_types]
self.content_cache.Put(key, children)
try:
self.message = "Directory Listing '%s' was taken on %s" % (
aff4_path, directory_node.Get(directory_node.Schema.TYPE.age))
except AttributeError:
pass
except IOError:
children = []
children.sort(reverse=reverse_sort)
row_index = start_row
# Make sure the table knows how large it is for paging.
self.size = len(children)
self.columns[1].base_path = urn
for fd in children[start_row:end_row]:
# We use the timestamp on the TYPE as a proxy for the last update time
# of this object - its only an estimate.
fd_type = fd.Get(fd.Schema.TYPE)
if fd_type:
self.AddCell(row_index, "Age", rdfvalue.RDFDatetime(fd_type.age))
self.AddCell(row_index, "Name", fd.urn)
# Add the fd to all the columns
for column in self.columns:
# This sets AttributeColumns directly from their fd.
if isinstance(column, semantic.AttributeColumn):
column.AddRowFromFd(row_index, fd)
if "Container" in fd.behaviours:
self.AddCell(row_index, "Icon", dict(icon="directory",
description="Directory"))
else:
self.AddCell(row_index, "Icon", dict(icon="file",
description="File Like Object"))
row_index += 1
if row_index > end_row:
return
class FileTable(AbstractFileTable):
"""A table that displays the content of a directory.
Listening Javascript Events:
- tree_select(aff4_path) - A selection event on the tree informing us of the
tree path. We re-layout the entire table on this event to show the
directory listing of aff4_path.
Generated Javascript Events:
- file_select(aff4_path, age) - The full AFF4 path for the file in the
directory which is selected. Age is the latest age we wish to see.
Internal State:
- client_id.
"""
root_path = None # The root will be dynamically set to the client path.
toolbar = "Toolbar"
context_help_url = "user_manual.html#_listing_the_virtual_filesystem"
def __init__(self, **kwargs):
super(FileTable, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn(
"Icon", renderer=semantic.IconRenderer, width="40px"))
self.AddColumn(semantic.RDFValueColumn(
"Name", renderer=semantic.SubjectRenderer, sortable=True, width="20%"))
self.AddColumn(semantic.AttributeColumn("type", width="10%"))
self.AddColumn(semantic.AttributeColumn("size", width="10%"))
self.AddColumn(semantic.AttributeColumn("stat.st_size", width="15%"))
self.AddColumn(semantic.AttributeColumn("stat.st_mtime", width="15%"))
self.AddColumn(semantic.AttributeColumn("stat.st_ctime", width="15%"))
self.AddColumn(semantic.RDFValueColumn(
"Age", renderer=AgeSelector, width="15%"))
def Layout(self, request, response):
"""Populate the table state with the request."""
self.state["client_id"] = client_id = request.REQ.get("client_id")
self.root_path = client_id
return super(FileTable, self).Layout(request, response)
def BuildTable(self, start_row, end_row, request):
client_id = request.REQ.get("client_id")
self.root_path = client_id
return super(FileTable, self).BuildTable(start_row, end_row, request)
class FileSystemTree(renderers.TreeRenderer):
"""A FileSystem navigation Tree.
Generated Javascript Events:
- tree_select(aff4_path) - The full aff4 path for the branch which the user
selected.
Internal State:
- client_id: The client this tree is showing.
- aff4_root: The aff4 node which forms the root of this tree.
"""
# Flows are special children which confuse users when seen, so we remove them
# from the tree. Note that they are still visible in the table.
hidden_branches = ["/flows"]
def Layout(self, request, response):
self.state["client_id"] = client_id = request.REQ.get("client_id")
self.state["aff4_root"] = request.REQ.get("aff4_root", client_id)
response = super(FileSystemTree, self).Layout(request, response)
return self.CallJavascript(response, "FileSystemTree.Layout")
def RenderBranch(self, path, request):
"""Renders tree leafs for filesystem path."""
client_id = request.REQ["client_id"]
aff4_root = rdfvalue.RDFURN(request.REQ.get("aff4_root", client_id))
# Path is relative to the aff4 root specified.
urn = aff4_root.Add(path)
try:
# Open the client
directory = aff4.FACTORY.Open(urn, token=request.token).Upgrade(
"VFSDirectory")
children = [ch for ch in directory.OpenChildren(limit=100000)
if "Container" in ch.behaviours]
try:
self.message = "Directory %s Last retrieved %s" % (
urn, directory.Get(directory.Schema.TYPE).age)
except AttributeError:
pass
for child in sorted(children):
self.AddElement(child.urn.RelativeName(urn))
except IOError as e:
self.message = "Error fetching %s: %s" % (urn, e)
class RecursiveRefreshDialog(renderers.ConfirmationDialogRenderer):
"""Dialog that allows user to recursively update directories."""
post_parameters = ["aff4_path"]
header = "Recursive Refresh"
proceed_button_title = "Refresh!"
content_template = renderers.Template("""
{{this.recursive_refresh_form|safe}}
""")
ajax_template = renderers.Template("""
<p class="text-info">Refresh started successfully!</p>
""")
def Layout(self, request, response):
args = rdfvalue.RecursiveListDirectoryArgs()
self.recursive_refresh_form = forms.SemanticProtoFormRenderer(
args, supressions=["pathspec"]).RawHTML(request)
return super(RecursiveRefreshDialog, self).Layout(request, response)
def RenderAjax(self, request, response):
aff4_path = rdfvalue.RDFURN(request.REQ.get("aff4_path"))
args = forms.SemanticProtoFormRenderer(
rdfvalue.RecursiveListDirectoryArgs()).ParseArgs(request)
fd = aff4.FACTORY.Open(aff4_path, aff4_type="AFF4Volume",
token=request.token)
args.pathspec = fd.real_pathspec
flow.GRRFlow.StartFlow(client_id=aff4_path.Split()[0],
flow_name="RecursiveListDirectory",
args=args,
notify_to_user=True,
token=request.token)
return self.RenderFromTemplate(self.ajax_template, response)
class Toolbar(renderers.TemplateRenderer):
"""A navigation enhancing toolbar.
Listening Javascript Events:
- AttributeUpdated(aff4_path, attribute): This event is fired then the
aff4_path has updated. If the content of this event have changed, we emit
the tree_select and file_select events to force the table to redraw.
Generated Javascript Events:
- file_select(aff4_path), tree_select(aff4_path) are fired when the buttons
are clicked.
Internal State:
- aff4_path: The path we are viewing now in the table.
"""
layout_template = renderers.Template("""
<div class="navbar navbar-default">
<div class="navbar-inner">
<div class="navbar-form pull-right">
<button class="btn btn-default" id='refresh_{{unique|escape}}'
name="Refresh" title='Refresh this directory listing.'>
<img src='/static/images/stock_refresh.png' class="toolbar_icon" />
</button>
<button class="btn btn-default" id='recursive_refresh_{{unique|escape}}'
title='Refresh this directory listing.' style='position: relative'
name="RecursiveRefresh" data-toggle="modal"
data-target="#recursive_refresh_dialog_{{unique|escape}}">
<img src='/static/images/stock_refresh.png' class="toolbar_icon" />
<span style='position: absolute; left: 23px; top: 5px; font-weight: bold;
font-size: 18px; -webkit-text-stroke: 1px #000; color: #fff'>R</span>
</button>
<button class="btn btn-default" id='rweowned'
title='Is this machine pwned?'>
<img src='/static/images/stock_dialog_question.png'
class="toolbar_icon" />
</button>
</div>
<ul class="breadcrumb">
{% for path, fullpath, fullpath_id, i, last in this.paths %}
<li {% if forloop.last %}class="active"{% endif %}>
{% if forloop.last %}
{{path|escape}}
{% else %}
<a id="path_{{i|escape}}">{{path|escape}}</a>
{% endif %}
</li>
{% endfor %}
<div class="clearfix"></div>
</ul>
</div>
</div>
<div id="refresh_action" class="hide"></div>
<div id="rweowned_dialog" class="modal"></div>
<div id="recursive_refresh_dialog_{{unique|escape}}"
class="modal" tabindex="-1" role="dialog" aria-hidden="true">
</div>
""")
def Layout(self, request, response):
"""Render the toolbar."""
self.state["client_id"] = client_id = request.REQ.get("client_id")
self.state["aff4_path"] = aff4_path = request.REQ.get(
"aff4_path", client_id)
client_urn = rdfvalue.ClientURN(client_id)
self.paths = [("/", client_urn, "_", 0)]
for path in rdfvalue.RDFURN(aff4_path).Split()[1:]:
previous = self.paths[-1]
fullpath = previous[1].Add(path)
self.paths.append((path, fullpath,
renderers.DeriveIDFromPath(
fullpath.RelativeName(client_urn)),
previous[3] + 1))
response = super(Toolbar, self).Layout(request, response)
return self.CallJavascript(response, "Toolbar.Layout",
aff4_path=utils.SmartUnicode(aff4_path),
paths=self.paths)
class UpdateAttribute(renderers.TemplateRenderer):
"""Reloads a directory listing from client.
The renderer will launch the flow in the layout method, and then call its
render method every few seconds to check if the flow is complete.
Post Parameters:
- aff4_path: The aff4 path to update the attribute for.
- aff4_type: If provided, the aff4 object will be upgraded to this type
before updating.
- attribute: The attribute name to update.
Generated Javascript Events:
- AttributeUpdated(aff4_path, attribute) - When the flow is complete we emit
this event.
"""
# Number of ms to wait
poll_time = 1000
def ParseRequest(self, request):
"""Parses parameters from the request."""
self.aff4_path = request.REQ.get("aff4_path")
self.flow_urn = request.REQ.get("flow_urn")
# Refresh the contains attribute
self.attribute_to_refresh = request.REQ.get("attribute", "CONTAINS")
def Layout(self, request, response):
"""Render the toolbar."""
self.ParseRequest(request)
try:
client_id = rdfvalue.RDFURN(self.aff4_path).Split(2)[0]
update_flow_urn = flow.GRRFlow.StartFlow(
client_id=client_id, flow_name="UpdateVFSFile",
token=request.token, vfs_file_urn=rdfvalue.RDFURN(self.aff4_path),
attribute=self.attribute_to_refresh)
update_flow = aff4.FACTORY.Open(
update_flow_urn, aff4_type="UpdateVFSFile", token=request.token)
self.flow_urn = str(update_flow.state.get_file_flow_urn)
except IOError as e:
raise IOError("Sorry. This path cannot be refreshed due to %s" % e)
if self.flow_urn:
response = super(UpdateAttribute, self).Layout(request, response)
return self.CallJavascript(response,
"UpdateAttribute.Layout",
aff4_path=self.aff4_path,
flow_urn=self.flow_urn,
attribute_to_refresh=self.attribute_to_refresh,
poll_time=self.poll_time)
def RenderAjax(self, request, response):
"""Continue polling as long as the flow is in flight."""
super(UpdateAttribute, self).RenderAjax(request, response)
self.ParseRequest(request)
# Check if the flow is still in flight.
try:
flow_obj = aff4.FACTORY.Open(self.flow_urn, token=request.token)
complete = not flow_obj.GetRunner().IsRunning()
except IOError:
# Something went wrong, stop polling.
complete = True
if complete:
return renderers.JsonResponse("1")
class AFF4ReaderMixin(object):
"""A helper which reads a buffer from an AFF4 object.
This is meant to be mixed in with the HexView and TextView renderers.
"""
def ReadBuffer(self, request, offset, length):
"""Renders the HexTable."""
# Allow derived classes to just set the urn directly
self.aff4_path = request.REQ.get("aff4_path")
self.age = request.REQ.get("age")
if not self.aff4_path: return
try:
fd = aff4.FACTORY.Open(self.aff4_path, token=request.token,
age=rdfvalue.RDFDatetime(self.age))
self.total_size = int(fd.Get(fd.Schema.SIZE))
except (IOError, TypeError, AttributeError):
self.total_size = 0
return ""
fd.Seek(offset)
return fd.Read(length)
class FileHexViewer(AFF4ReaderMixin, fileview_widgets.HexView):
"""A HexView renderer."""
class FileTextViewer(AFF4ReaderMixin, fileview_widgets.TextView):
"""A TextView renderer."""
class VirtualFileSystemView(renderers.Splitter):
"""This is the main view to browse files."""
behaviours = frozenset(["Host"])
order = 10
description = "Browse Virtual Filesystem"
left_renderer = "FileSystemTree"
top_right_renderer = "FileTable"
bottom_right_renderer = "AFF4ObjectRenderer"
class DownloadView(renderers.TemplateRenderer):
"""Renders a download page."""
# We allow a longer execution time here to be able to download large files.
max_execution_time = 60 * 15
layout_template = renderers.Template("""
<h3>{{ this.path|escape }}</h3>
<div id="{{ unique|escape }}_action" class="hide"></div>
{% if this.hash %}
Hash was {{ this.hash|escape }}.
{% endif %}
{% if this.file_exists %}
As downloaded on {{ this.age|escape }}.<br>
<p>
<button id="{{ unique|escape }}_2" class="btn btn-default">
Download ({{this.size|escape}} bytes)
</button>
</p>
<p>or download using command line export tool:</p>
<pre>
{{ this.export_command_str|escape }}
</pre>
<hr/>
{% endif %}
<button id="{{ unique|escape }}" class="btn btn-default">
Get a new Version
</button>
</div>
""")
error_template = renderers.Template("""
<div class="alert alert-danger alert-block">
<h4>Error!</h4> {{this.path|escape}} does not appear to be a file object.
<p><em>{{this.error_message|escape}}</em></p>
</div>
""")
bad_extensions = [".bat", ".cmd", ".exe", ".com", ".pif", ".py", ".pl",
".scr", ".vbs"]
def Layout(self, request, response):
"""Present a download form."""
self.age = rdfvalue.RDFDatetime(request.REQ.get("age"))
client_id = request.REQ.get("client_id")
aff4_path = request.REQ.get("aff4_path", client_id)
try:
fd = aff4.FACTORY.Open(aff4_path, token=request.token, age=self.age)
self.path = fd.urn
self.hash = fd.Get(fd.Schema.HASH, None)
self.size = fd.Get(fd.Schema.SIZE)
# If data is available to read - we present the download button.
self.file_exists = False
try:
if fd.Read(1):
self.file_exists = True
except (IOError, AttributeError):
pass
self.export_command_str = u" ".join([
config_lib.CONFIG["AdminUI.export_command"],
"--username", utils.ShellQuote(request.token.username),
"--reason", utils.ShellQuote(request.token.reason),
"file",
"--path", utils.ShellQuote(aff4_path),
"--output", "."])
response = super(DownloadView, self).Layout(request, response)
return self.CallJavascript(response, "DownloadView.Layout",
aff4_path=aff4_path,
client_id=client_id,
age_int=int(self.age),
file_exists=self.file_exists,
renderer=self.__class__.__name__,
reason=request.token.reason)
except (AttributeError, IOError) as e:
# Render the error template instead.
self.error_message = e.message
return renderers.TemplateRenderer.Layout(self, request, response,
self.error_template)
def Download(self, request, _):
"""Stream the file into the browser."""
# Open the client
client_id = request.REQ.get("client_id")
self.aff4_path = request.REQ.get("aff4_path", client_id)
self.age = rdfvalue.RDFDatetime(request.REQ.get("age")) or aff4.NEWEST_TIME
self.token = request.token
# If set, we don't append .noexec to dangerous extensions.
safe_extension = bool(request.REQ.get("safe_extension", 0))
if self.aff4_path:
def Generator():
fd = aff4.FACTORY.Open(self.aff4_path, token=request.token,
age=self.age)
while True:
data = fd.Read(1000000)
if not data: break
yield data
filename = os.path.basename(utils.SmartStr(self.aff4_path))
if not safe_extension:
for ext in self.bad_extensions:
if filename.lower().endswith(ext):
filename += ".noexec"
response = http.HttpResponse(content=Generator(),
content_type="binary/octet-stream")
# This must be a string.
response["Content-Disposition"] = ("attachment; filename=%s" % filename)
return response
class UploadView(renderers.TemplateRenderer):
"""Renders an upload page."""
post_parameters = ["tree_path"]
upload_handler = "UploadHandler"
layout_template = renderers.Template("""
{% if grr.state.tree_path %}
<h3>Upload to {{ grr.state.tree_path|escape }}</h3>
{% endif %}
<form id="{{unique|escape}}_form" enctype="multipart/form-data">
<input class="btn btn-default btn-file" id="{{ unique|escape }}_file"
type="file" name="uploadFile" />
</form>
<button class="btn btn-default" id="{{ unique|escape }}_upload_button">
Upload
</button>
<br/><br/>
<div id="{{ unique|escape }}_upload_results"/>
<div id="{{ unique|escape }}_upload_progress"/>
""")
def Layout(self, request, response):
response = super(UploadView, self).Layout(request, response)
return self.CallJavascript(response, "UploadView.Layout",
upload_handler=self.upload_handler,
upload_state=self.state)
class UploadHandler(renderers.TemplateRenderer):
"""Handles an uploaded file."""
# We allow a longer execution time here to be able to upload large files.
max_execution_time = 60 * 2
storage_path = "aff4:/config"
error_template = renderers.Template("""
Error: {{this.error|escape}}.
""")
success_template = renderers.Template("""
Success: File uploaded to {{this.dest_path|escape}}.
""")
def RenderAjax(self, request, response):
"""Store the file on the server."""
super(UploadHandler, self).RenderAjax(request, response)
try:
self.uploaded_file = request.FILES.items()[0][1]
self.dest_path, aff4_type = self.GetFilePath(request)
self.ValidateFile()
dest_file = aff4.FACTORY.Create(self.dest_path, aff4_type=aff4_type,
token=request.token)
for chunk in self.uploaded_file.chunks():
dest_file.Write(chunk)
dest_file.Close()
return super(UploadHandler, self).Layout(request, response,
self.success_template)
except (IOError, IndexError) as e:
self.error = e
return super(UploadHandler, self).Layout(request, response,
self.error_template)
def GetFilePath(self, unused_request):
"""Get the path to write the file to and aff4 type as a tuple."""
path = rdfvalue.RDFURN(self.storage_path).Add(self.uploaded_file.name)
return path, "VFSFile"
def ValidateFile(self):
"""Check if a file matches what we expected to be uploaded.
Raises:
IOError: On validation failure.
"""
if self.uploaded_file.size < 100:
raise IOError("File is too small.")
class AFF4Stats(renderers.TemplateRenderer):
"""Show stats about the currently selected AFF4 object.
Post Parameters:
- aff4_path: The aff4 path to update the attribute for.
- age: The version of the AFF4 object to display.
"""
# This renderer applies to this AFF4 type
name = "Stats"
css_class = ""
historical_renderer = "HistoricalView"
# If specified, only these attributes will be shown.
attributes_to_show = None
layout_template = renderers.Template("""
<div class="container-fluid">
<div class="row horizontally-padded">
<div id="{{unique|escape}}" class="{{this.css_class}}">
<h3>{{ this.path|escape }} @ {{this.age|escape}}</h3>
<table id='{{ unique|escape }}'
class="table table-condensed table-bordered table-fullwidth fixed-columns">
<colgroup>
<col style="width: 20ex" />
<col style="width: 100%" />
<col style="width: 20ex" />
</colgroup>
<thead>
<tr>
<th class="ui-state-default">Attribute</th>
<th class="ui-state-default">Value</th>
<th class="ui-state-default">Age</th>
</tr>
</thead>
<tbody>
{% for name, attributes in this.classes %}
<tr>
<td colspan=3 class="grr_aff4_type_header"><b>{{ name|escape }}</b></td>
</tr>
{% for attribute, description, value, age, multi in attributes %}
<tr>
<td class='attribute_opener' attribute="{{attribute|escape}}">
{% if multi %}
<ins class='fg-button ui-icon ui-icon-plus'/>
{% endif %}
<b title='{{ description|escape }}'>{{ attribute|escape }}</b>
</td>
<td>
<div class="default_view">{{ value|safe }}</div>
<div id="content_{{unique|escape}}_{{attribute|escape}}"
class="historical_view"></div>
</td>
<td><div class='non-breaking'>{{ age|escape }}</div></td>
</tr>
{% endfor %}
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
""")
def Layout(self, request, response, client_id=None, aff4_path=None, age=None):
"""Introspect the Schema for each object."""
# Allow derived classes to just set the client_id/aff4_path/age directly
self.client_id = client_id or request.REQ.get("client_id")
self.aff4_path = aff4_path or request.REQ.get("aff4_path")
self.age = request.REQ.get("age")
if self.age is None:
self.age = rdfvalue.RDFDatetime().Now()
else:
self.age = rdfvalue.RDFDatetime(self.age)
if not self.aff4_path: return
try:
self.fd = aff4.FACTORY.Open(self.aff4_path, token=request.token,
age=age or self.age)
self.classes = self.RenderAFF4Attributes(self.fd, request)
self.state["path"] = self.path = utils.SmartStr(self.fd.urn)
except IOError:
self.path = "Unable to open %s" % self.urn
self.classes = []
response = super(AFF4Stats, self).Layout(request, response)
return self.CallJavascript(response, "AFF4Stats.Layout",
historical_renderer=self.historical_renderer,
historical_renderer_state=self.state)
def RenderAFF4Attributes(self, fd, request=None):
"""Returns attributes rendered by class."""
classes = []
attribute_names = set()
for flow_cls in fd.__class__.__mro__:
if not hasattr(flow_cls, "SchemaCls"):
continue
schema = flow_cls.SchemaCls
attributes = []
for name, attribute in sorted(schema.__dict__.items()):
if not isinstance(attribute, aff4.Attribute): continue
# If we already showed this attribute we move on
if attribute.predicate in attribute_names: continue
values = list(fd.GetValuesForAttribute(attribute))
multi = len(values) > 1
if values:
attribute_names.add(attribute.predicate)
value_renderer = semantic.FindRendererForObject(values[0])
if self.attributes_to_show and name not in self.attributes_to_show:
continue
attributes.append((name, attribute.description,
# This is assumed to be in safe RawHTML and not
# escaped.
value_renderer.RawHTML(request),
rdfvalue.RDFDatetime(values[0].age), multi))
if attributes:
classes.append((flow_cls.__name__, attributes))
return classes
class HostInformation(AFF4Stats):
"""View information about the host."""
description = "Host Information"
behaviours = frozenset(["Host"])
order = 0
css_class = "TableBody"
def Layout(self, request, response, client_id=None):
client_id = client_id or request.REQ.get("client_id")
urn = rdfvalue.ClientURN(client_id)
# This verifies we have auth for deep client paths. If this raises, we
# force the auth screen.
aff4.FACTORY.Open(rdfvalue.RDFURN(urn).Add("CheckAuth"),
token=request.token, mode="r")
return super(HostInformation, self).Layout(request, response,
client_id=client_id,
aff4_path=urn)
class AFF4ObjectRenderer(renderers.TemplateRenderer):
"""This renderer delegates to the correct subrenderer based on the request.
Listening Javascript Events:
- file_select(aff4_path, age) - A selection event on the file table
informing us of a new aff4 file to show. We redraw the entire bottom right
side using a new renderer.
"""
layout_template = renderers.Template("""
<div id="{{unique|escape}}"></div>
""")
# When a message appears on this queue we choose a new renderer.
event_queue = "file_select"
def Layout(self, request, response):
"""Produces a layout as returned by the subrenderer."""
# This is the standard renderer for now.
subrenderer = FileViewTabs
client_id = request.REQ.get("client_id")
aff4_path = request.REQ.get("aff4_path", client_id)
if not aff4_path:
raise RuntimeError("No valid aff4 path or client id provided")
fd = aff4.FACTORY.Open(aff4_path, token=request.token)
fd_type = fd.Get(fd.Schema.TYPE)
if fd_type:
for cls in self.classes.values():
if getattr(cls, "aff4_type", None) == fd_type:
subrenderer = cls
subrenderer(fd).Layout(request, response)
response = super(AFF4ObjectRenderer, self).Layout(request, response)
return self.CallJavascript(response, "AFF4ObjectRenderer.Layout",
event_queue=self.event_queue,
renderer=self.__class__.__name__)
class FileViewTabs(renderers.TabLayout):
"""Show a tabset to inspect the selected file.
Internal State:
- aff4_path - The AFF4 object we are currently showing.
- age: The version of the AFF4 object to display.
"""
FILE_TAB_NAMES = ["Stats", "Download", "TextView", "HexView"]
FILE_DELEGATED_RENDERERS = ["AFF4Stats", "DownloadView", "FileTextViewer",
"FileHexViewer"]
COLLECTION_TAB_NAMES = ["Stats", "Results", "Export"]
COLLECTION_DELEGATED_RENDERERS = ["AFF4Stats", "RDFValueCollectionRenderer",
"CollectionExportView"]
fd = None
def __init__(self, fd=None, **kwargs):
self.fd = fd
super(FileViewTabs, self).__init__(**kwargs)
def DisableTabs(self):
self.disabled = [tab_renderer for tab_renderer in self.delegated_renderers
if tab_renderer != "AFF4Stats"]
def Layout(self, request, response):
"""Check if the file is a readable and disable the tabs."""
client_id = request.REQ.get("client_id")
self.aff4_path = request.REQ.get("aff4_path", client_id)
self.age = request.REQ.get("age", rdfvalue.RDFDatetime().Now())
self.state = dict(aff4_path=self.aff4_path, age=int(self.age))
# By default we assume that we're dealing with a regular file,
# so we show tabs for files.
self.names = self.FILE_TAB_NAMES
self.delegated_renderers = self.FILE_DELEGATED_RENDERERS
try:
if self.fd is not None:
self.fd = aff4.FACTORY.Open(self.aff4_path, token=request.token)
# If file is actually a collection, then show collections-related tabs.
if isinstance(self.fd, aff4.RDFValueCollection):
self.names = self.COLLECTION_TAB_NAMES
self.delegated_renderers = self.COLLECTION_DELEGATED_RENDERERS
# If collection doesn't have StatEntries or FileFinderResults, disable
# the Export tab.
if not CollectionExportView.IsCollectionExportable(self.fd,
token=request.token):
self.disabled = ["CollectionExportView"]
if isinstance(self.fd, aff4.RekallResponseCollection):
# Make a copy so this change is not permanent.
self.delegated_renderers = self.delegated_renderers[:]
self.delegated_renderers[1] = "RekallResponseCollectionRenderer"
else:
if not hasattr(self.fd, "Read"):
self.DisableTabs()
except IOError:
self.DisableTabs()
return super(FileViewTabs, self).Layout(request, response)
class CollectionExportView(renderers.TemplateRenderer):
"""Displays export command to be used to export collection."""
layout_template = renderers.Template("""
<p>To download all the files referenced in the collection, you can use
this command:</p>
<pre>
{{ this.export_command_str|escape }}
</pre>
<p><em>NOTE: You can optionally add <tt>--dump_client_info</tt> flag to
dump client info in YAML format.</em></p>
""")
@staticmethod
def IsCollectionExportable(collection_urn_or_obj,
token=None):
if isinstance(collection_urn_or_obj, aff4.RDFValueCollection):
collection = collection_urn_or_obj
else:
collection = aff4.FACTORY.Create(
collection_urn_or_obj, "RDFValueCollection", mode="r", token=token)
if not collection:
return False
try:
export.CollectionItemToAff4Path(collection[0])
except export.ItemNotExportableError:
return False
return True
def Layout(self, request, response, aff4_path=None):
aff4_path = aff4_path or request.REQ.get("aff4_path")
self.export_command_str = " ".join([
config_lib.CONFIG["AdminUI.export_command"],
"--username", utils.ShellQuote(request.token.username),
"--reason", utils.ShellQuote(request.token.reason),
"collection_files",
"--path", utils.ShellQuote(aff4_path),
"--output", "."])
return super(CollectionExportView, self).Layout(request, response)
class RWeOwned(renderers.TemplateRenderer):
"""A magic 8 ball reply to the question - Are we Owned?"""
layout_template = renderers.Template("""
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal" aria-hidden="true">
x
</button>
<h3>Are we owned?</h3>
</div>
<div class="modal-body">
<p class="text-info">
{{this.choice|escape}}
</div>
</div>
</div>
""")
def Layout(self, request, response):
"""Render a magic 8 ball easter-egg."""
options = u"""It is certain
You were eaten by a Grue!
中国 got you!!
All your bases are belong to us!
Maybe it was the Russians?
It is decidedly so
Without a doubt
Yes - definitely
You may rely on it
As I see it, yes
Most likely
Outlook good
Signs point to yes
Yes
Reply hazy, try again
Ask again later
Better not tell you now
Cannot predict now
Concentrate and ask again
Don't count on it
My reply is no
My sources say no
Outlook not so good
Very doubtful""".splitlines()
self.choice = options[random.randint(0, len(options) - 1)]
return super(RWeOwned, self).Layout(request, response)
class HistoricalView(renderers.TableRenderer):
"""Show historical view for an attribute."""
def __init__(self, **kwargs):
super(HistoricalView, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn("Age"))
def Layout(self, request, response):
"""Add the columns to the table."""
self.AddColumn(semantic.RDFValueColumn(request.REQ.get("attribute")))
return super(HistoricalView, self).Layout(request, response)
def BuildTable(self, start_row, end_row, request):
"""Populate the table with attribute values."""
attribute_name = request.REQ.get("attribute")
if attribute_name is None:
return
urn = request.REQ.get("urn")
client_id = request.REQ.get("client_id")
path = request.REQ.get("path")
self.AddColumn(semantic.RDFValueColumn(attribute_name))
fd = aff4.FACTORY.Open(urn or path or client_id,
token=request.token, age=aff4.ALL_TIMES)
self.BuildTableFromAttribute(attribute_name, fd, start_row, end_row)
def BuildTableFromAttribute(self, attribute_name, fd, start_row, end_row):
"""Build the table for the attribute."""
attribute = getattr(fd.Schema, attribute_name)
additional_rows = False
i = 0
for i, value in enumerate(fd.GetValuesForAttribute(attribute)):
if i > end_row:
additional_rows = True
break
if i < start_row:
continue
self.AddCell(i, "Age", rdfvalue.RDFDatetime(value.age))
self.AddCell(i, attribute_name, value)
self.size = i + 1
return additional_rows
class VersionSelectorDialog(renderers.TableRenderer):
"""Renders the version available for this object."""
layout_template = renderers.Template("""
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close" data-dismiss="modal"
aria-hidden="true">
x
</button>
<h4>Versions of {{this.state.aff4_path}}</h4>
</div>
<div class="modal-body">
<div class="padded">
""") + renderers.TableRenderer.layout_template + """
</div>
</div>
<div class="modal-footer">
<button class="btn btn-default" data-dismiss="modal" name="Ok"
aria-hidden="true">Ok</button>
</div>
</div>
</div>
"""
def __init__(self, **kwargs):
super(VersionSelectorDialog, self).__init__(**kwargs)
self.AddColumn(semantic.RDFValueColumn("Age"))
self.AddColumn(semantic.RDFValueColumn("Type"))
def Layout(self, request, response):
"""Populates the table state with the request."""
self.state["aff4_path"] = request.REQ.get("aff4_path")
response = super(VersionSelectorDialog, self).Layout(request, response)
return self.CallJavascript(response, "VersionSelectorDialog.Layout",
aff4_path=self.state["aff4_path"])
def BuildTable(self, start_row, end_row, request):
"""Populates the table with attribute values."""
aff4_path = request.REQ.get("aff4_path")
if aff4_path is None: return
fd = aff4.FACTORY.Open(aff4_path, age=aff4.ALL_TIMES, token=request.token)
i = 0
for i, type_attribute in enumerate(
fd.GetValuesForAttribute(fd.Schema.TYPE)):
if i < start_row or i > end_row:
continue
self.AddCell(i, "Age", rdfvalue.RDFDatetime(type_attribute.age))
self.AddCell(i, "Type", type_attribute)
|
ksmaheshkumar/grr
|
gui/plugins/fileview.py
|
Python
|
apache-2.0
| 51,028
|
#-
# Copyright (c) 2014 Robert M. Norton
# All rights reserved.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
@attr('mt')
class test_ipc(BaseBERITestCase):
def test_cause_t0(self):
self.assertRegisterEqual(self.MIPS.threads[0].s0 & 0xffff, 0x800, "Thread 0 cause register not interrupt on IP3")
def test_epc_t0(self):
expected_epc=self.MIPS.threads[0].s2
self.assertRegisterInRange(self.MIPS.threads[0].s1, expected_epc, expected_epc + 4, "Thread 0 epc register not expected_epc")
def test_cause_t1(self):
self.assertRegisterEqual(self.MIPS.threads[1].s0 & 0xffff, 0x400, "Thread 1 cause register not interrupt on IP2")
def test_epc_t1(self):
expected_epc=self.MIPS.threads[0].s3
self.assertRegisterInRange(self.MIPS.threads[1].s1, expected_epc, expected_epc + 4, "Thread 1 epc register not expected_epc")
|
8l/beri
|
cheritest/trunk/tests/mt/test_ipc.py
|
Python
|
apache-2.0
| 1,772
|
# -*- coding: utf-8 -*-
#
# Author: François Rossigneux <francois.rossigneux@inria.fr>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from climate import tests
class DBUtilsTestCase(tests.TestCase):
"""Test case for DB Utils."""
pass
|
frossigneux/blazar
|
climate/tests/db/test_utils.py
|
Python
|
apache-2.0
| 741
|
# Copyright (c) 2015 NTT, OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from congress.api import api_utils
from congress.api import webservice
from congress.tests import base
class TestAPIUtils(base.SqlTestCase):
def setUp(self):
super(TestAPIUtils, self).setUp()
def test_create_table_dict(self):
table_name = 'fake_table'
schema = {'fake_table': ({'name': 'id', 'desc': None},
{'name': 'name', 'desc': None})}
expected = {'table_id': table_name,
'columns': [{'name': 'id', 'description': None},
{'name': 'name', 'description': None}]}
result = api_utils.create_table_dict(table_name, schema)
self.assertEqual(expected, result)
def test_get_id_from_context_ds_id(self):
context = {'ds_id': 'datasource id'}
expected = ('datasource id', 'datasource id')
result = api_utils.get_id_from_context(context)
self.assertEqual(expected, result)
def test_get_id_from_context_policy_id(self):
context = {'policy_id': 'policy id'}
expected = ('__engine', 'policy id')
result = api_utils.get_id_from_context(context)
self.assertEqual(expected, result)
def test_get_id_from_context_with_invalid_context(self):
context = {'invalid_id': 'invalid id'}
self.assertRaises(webservice.DataModelException,
api_utils.get_id_from_context, context)
|
openstack/congress
|
congress/tests/api/test_api_utils.py
|
Python
|
apache-2.0
| 2,124
|
# Copyright 2017 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import nova.conf
hyperv_opts = [
cfg.IntOpt('evacuate_task_state_timeout',
default=600,
help='Number of seconds to wait for an instance to be '
'evacuated during host maintenance.'),
cfg.IntOpt('cluster_event_check_interval',
deprecated_for_removal=True,
deprecated_since="5.0.1",
default=2),
cfg.BoolOpt('instance_automatic_shutdown',
default=False,
help='Automatically shutdown instances when the host is '
'shutdown. By default, instances will be saved, which '
'adds a disk overhead. Changing this option will not '
'affect existing instances.'),
cfg.IntOpt('instance_live_migration_timeout',
default=300,
min=0,
help='Number of seconds to wait for an instance to be '
'live migrated (Only applies to clustered instances '
'for the moment).'),
cfg.IntOpt('max_failover_count',
default=1,
min=1,
help="The maximum number of failovers that can occur in the "
"failover_period timeframe per VM. Once a VM's number "
"failover reaches this number, the VM will simply end up "
"in a Failed state."),
cfg.IntOpt('failover_period',
default=6,
min=1,
help="The number of hours in which the max_failover_count "
"number of failovers can occur."),
cfg.BoolOpt('recreate_ports_on_failover',
default=True,
help="When enabled, the ports will be recreated for failed "
"over instances. This ensures that we're not left with "
"a stale port."),
cfg.BoolOpt('auto_failback',
default=True,
help="Allow the VM the failback to its original host once it "
"is available."),
cfg.BoolOpt('force_destroy_instances',
default=False,
help="If this option is enabled, instance destroy requests "
"are executed immediately, regardless of instance "
"pending tasks. In some situations, the destroy "
"operation will fail (e.g. due to file locks), "
"requiring subsequent retries."),
cfg.BoolOpt('move_disks_on_cold_migration',
default=True,
help="Move the instance files to the instance dir configured "
"on the destination host. You may consider disabling "
"this when using multiple CSVs or shares and you wish "
"the source location to be preserved."),
]
coordination_opts = [
cfg.StrOpt('backend_url',
default='file:///C:/OpenStack/Lock',
help='The backend URL to use for distributed coordination.'),
]
CONF = nova.conf.CONF
CONF.register_opts(coordination_opts, 'coordination')
CONF.register_opts(hyperv_opts, 'hyperv')
def list_opts():
return [('coordination', coordination_opts),
('hyperv', hyperv_opts)]
|
openstack/compute-hyperv
|
compute_hyperv/nova/conf.py
|
Python
|
apache-2.0
| 3,933
|
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
class PositionalSelector(BaseEstimator, TransformerMixin):
def __init__(self, positions):
self.positions = positions
def fit(self, X, y=None):
return self
def transform(self, X):
return np.array(X)[:, self.positions]
class StripString(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
return self
def transform(self, X):
strip = np.vectorize(str.strip)
return strip(np.array(X))
class SimpleOneHotEncoder(BaseEstimator, TransformerMixin):
def fit(self, X, y=None):
self.values = []
for c in range(X.shape[1]):
Y = X[:, c]
values = {v: i for i, v in enumerate(np.unique(Y))}
self.values.append(values)
return self
def transform(self, X):
X = np.array(X)
matrices = []
for c in range(X.shape[1]):
Y = X[:, c]
matrix = np.zeros(shape=(len(Y), len(self.values[c])), dtype=np.int8)
for i, x in enumerate(Y):
if x in self.values[c]:
matrix[i][self.values[c][x]] = 1
matrices.append(matrix)
res = np.concatenate(matrices, axis=1)
return res
|
GoogleCloudPlatform/training-data-analyst
|
quests/dei/census/custom_transforms.py
|
Python
|
apache-2.0
| 1,287
|
import base64
import json
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from ..views import register, statements, activities
class ActivityTests(TestCase):
@classmethod
def setUpClass(cls):
print "\n%s" % __name__
def setUp(self):
self.username = "tester"
self.email = "test@tester.com"
self.password = "test"
self.auth = "Basic %s" % base64.b64encode("%s:%s" % (self.username, self.password))
form = {'username':self.username, 'email': self.email,'password':self.password,'password2':self.password}
self.client.post(reverse(register),form, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
def test_get(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType':'Activity', 'id':'act:foobar'}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId':'act:foobar'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar', rsp)
self.assertIn('Activity', rsp)
self.assertIn('objectType', rsp)
self.assertIn('content-length', response._headers)
def test_get_not_exist(self):
activity_id = "this:does_not_exist"
response = self.client.get(reverse(activities), {'activityId':activity_id}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 404)
self.assertEqual(response.content, 'No activity found with ID this:does_not_exist')
def test_get_not_array(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType':'Activity', 'id':'act:foobar'}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId':'act:foobar'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('content-length', response._headers)
rsp_obj = json.loads(rsp)
self.assertEqual('act:foobar', rsp_obj['id'])
def test_head(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType':'Activity', 'id':'act:foobar'}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.head(reverse(activities), {'activityId':'act:foobar'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, '')
self.assertIn('content-length', response._headers)
def test_get_def(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar1',
'definition': {'name': {'en-US':'testname', 'en-GB': 'altname'},
'description': {'en-US':'testdesc', 'en-GB': 'altdesc'},
'type': 'type:course','interactionType': 'other'}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId':'act:foobar1'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar1', rsp)
self.assertIn('type:course', rsp)
self.assertIn('other', rsp)
rsp_dict = json.loads(rsp)
self.assertEqual(len(rsp_dict['definition']['name'].keys()), 1)
self.assertEqual(len(rsp_dict['definition']['description'].keys()), 1)
def test_get_ext(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar2',
'definition': {'name': {'en-FR':'testname2'},'description': {'en-FR':'testdesc2'},
'type': 'type:course','interactionType': 'other',
'extensions': {'ext:key1': 'value1', 'ext:key2': 'value2'}}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId':'act:foobar2'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar2', rsp)
self.assertIn('type:course', rsp)
self.assertIn('other', rsp)
self.assertIn('en-FR', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('key1', rsp)
self.assertIn('key2', rsp)
self.assertIn('value1', rsp)
self.assertIn('value2', rsp)
def test_get_crp_multiple_choice(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar3',
'definition': {'name': {'en-FR':'testname2'},
'description': {'en-FR':'testdesc2', 'en-CH': 'altdesc'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'choice',
'correctResponsesPattern': ['golf', 'tetris'],'choices':[{'id': 'golf',
'description': {'en-US':'Golf Example', 'en-GB':'alt golf'}},{'id': 'tetris',
'description':{'en-US': 'Tetris Example'}}, {'id':'facebook',
'description':{'en-US':'Facebook App'}},{'id':'scrabble',
'description': {'en-US': 'Scrabble Example'}}]}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId':'act:foobar3'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar3', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('choice', rsp)
self.assertIn('en-FR', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('golf', rsp)
self.assertIn('tetris', rsp)
rsp_dict = json.loads(rsp)
self.assertEqual(len(rsp_dict['definition']['description'].keys()), 1)
self.assertEqual(len(rsp_dict['definition']['choices'][0]['description'].keys()), 1)
self.assertEqual(len(rsp_dict['definition']['choices'][1]['description'].keys()), 1)
self.assertEqual(len(rsp_dict['definition']['choices'][2]['description'].keys()), 1)
self.assertEqual(len(rsp_dict['definition']['choices'][3]['description'].keys()), 1)
def test_get_crp_true_false(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar4',
'definition': {'name': {'en-US':'testname2'},'description': {'en-US':'testdesc2'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'true-false','correctResponsesPattern': ['true']}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId': 'act:foobar4'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar4', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('true-false', rsp)
self.assertIn('en-US', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('correctResponsesPattern', rsp)
self.assertIn('true', rsp)
def test_get_crp_fill_in(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar5',
'definition': {'name': {'en-US':'testname2'},'description': {'en-US':'testdesc2'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'fill-in',
'correctResponsesPattern': ['Fill in answer']}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId': 'act:foobar5'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar5', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('fill-in', rsp)
self.assertIn('en-US', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('correctResponsesPattern', rsp)
self.assertIn('Fill in answer', rsp)
def test_get_crp_long_fill_in(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar6',
'definition': {'name': {'en-FR':'testname2'},'description': {'en-FR':'testdesc2'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'fill-in',
'correctResponsesPattern': ['Long fill in answer']}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId': 'act:foobar6'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar6', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('fill-in', rsp)
self.assertIn('en-FR', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('correctResponsesPattern', rsp)
self.assertIn('Long fill in answer', rsp)
def test_get_crp_likert(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar7',
'definition': {'name': {'en-US':'testname2'},'description': {'en-US':'testdesc2'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'likert','correctResponsesPattern': ['likert_3'],
'scale':[{'id': 'likert_0', 'description': {'en-US':'Its OK'}},{'id': 'likert_1',
'description':{'en-US': 'Its Pretty Cool'}}, {'id':'likert_2',
'description':{'en-US':'Its Cool Cool'}},{'id':'likert_3',
'description': {'en-US': 'Its Gonna Change the World'}}]}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId': 'act:foobar7'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar7', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('likert', rsp)
self.assertIn('en-US', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('correctResponsesPattern', rsp)
self.assertIn('likert_3', rsp)
self.assertIn('likert_2', rsp)
self.assertIn('likert_1', rsp)
def test_get_crp_matching(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar8',
'definition': {'name': {'en-US':'testname2'},'description': {'en-FR':'testdesc2'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'matching',
'correctResponsesPattern': ['lou.3,tom.2,andy.1'],'source':[{'id': 'lou',
'description': {'en-US':'Lou'}},{'id': 'tom','description':{'en-US': 'Tom'}},
{'id':'andy', 'description':{'en-US':'Andy'}}],'target':[{'id':'1',
'description':{'en-US': 'SCORM Engine'}},{'id':'2','description':{'en-US': 'Pure-sewage'}},
{'id':'3', 'description':{'en-US': 'SCORM Cloud'}}]}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId': 'act:foobar8'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar8', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('matching', rsp)
self.assertIn('en-FR', rsp)
self.assertIn('en-US', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('correctResponsesPattern', rsp)
self.assertIn('lou.3,tom.2,andy.1', rsp)
self.assertIn('source', rsp)
self.assertIn('target', rsp)
def test_get_crp_performance(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar9',
'definition': {'name': {'en-US':'testname2', 'en-GB': 'altname'},
'description': {'en-US':'testdesc2'},'type': 'http://adlnet.gov/expapi/activities/cmi.interaction',
'interactionType': 'performance',
'correctResponsesPattern': ['pong.1,dg.10,lunch.4'],'steps':[{'id': 'pong',
'description': {'en-US':'Net pong matches won'}},{'id': 'dg',
'description':{'en-US': 'Strokes over par in disc golf at Liberty'}},
{'id':'lunch', 'description':{'en-US':'Lunch having been eaten',
'en-FR': 'altlunch'}}]}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId': 'act:foobar9'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar9', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('performance', rsp)
self.assertIn('steps', rsp)
self.assertIn('correctResponsesPattern', rsp)
self.assertIn('pong.1,dg.10,lunch.4', rsp)
self.assertIn('Net pong matches won', rsp)
self.assertIn('Strokes over par in disc golf at Liberty', rsp)
rsp_dict = json.loads(rsp)
self.assertEqual(len(rsp_dict['definition']['name'].keys()), 1)
self.assertEqual(len(rsp_dict['definition']['description'].keys()), 1)
self.assertEqual(len(rsp_dict['definition']['steps'][0]['description'].keys()), 1)
self.assertEqual(len(rsp_dict['definition']['steps'][1]['description'].keys()), 1)
self.assertEqual(len(rsp_dict['definition']['steps'][2]['description'].keys()), 1)
def test_get_crp_sequencing(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar10',
'definition': {'name': {'en-US':'testname2'},'description': {'en-US':'testdesc2'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'sequencing',
'correctResponsesPattern': ['lou,tom,andy,aaron'],'choices':[{'id': 'lou',
'description': {'en-US':'Lou'}},{'id': 'tom','description':{'en-US': 'Tom'}},
{'id':'andy', 'description':{'en-US':'Andy'}},{'id':'aaron', 'description':{'en-US':'Aaron'}}]}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId': 'act:foobar10'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar10', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('sequencing', rsp)
self.assertIn('choices', rsp)
self.assertIn('en-US', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('correctResponsesPattern', rsp)
self.assertIn('lou,tom,andy,aaron', rsp)
def test_get_crp_numeric(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id':'act:foobar11',
'definition': {'name': {'en-US':'testname2'},'description': {'en-US':'testdesc2'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'numeric','correctResponsesPattern': ['4'],
'extensions': {'ext:key1': 'value1', 'ext:key2': 'value2','ext:key3': 'value3'}}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId': 'act:foobar11'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar11', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('numeric', rsp)
self.assertIn('4', rsp)
self.assertIn('en-US', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('correctResponsesPattern', rsp)
self.assertIn('extensions', rsp)
self.assertIn('key1', rsp)
self.assertIn('value1', rsp)
self.assertIn('key2', rsp)
self.assertIn('value2', rsp)
self.assertIn('key3', rsp)
self.assertIn('value3', rsp)
def test_get_crp_other(self):
st = json.dumps({"actor":{"objectType":"Agent","mbox": "mailto:tom@adlnet.gov"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assess","display": {"en-US":"assessed"}},
"object":{'objectType': 'Activity', 'id': 'act:foobar12',
'definition': {'name': {'en-US':'testname2'},'description': {'en-US':'testdesc2'},
'type': 'http://adlnet.gov/expapi/activities/cmi.interaction','interactionType': 'other',
'correctResponsesPattern': ['(35.937432,-86.868896)']}}})
st_post = self.client.post(reverse(statements), st, content_type="application/json", Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(st_post.status_code, 200)
response = self.client.get(reverse(activities), {'activityId': 'act:foobar12'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
rsp = response.content
self.assertEqual(response.status_code, 200)
self.assertIn('act:foobar12', rsp)
self.assertIn('http://adlnet.gov/expapi/activities/cmi.interaction', rsp)
self.assertIn('other', rsp)
self.assertIn('(35.937432,-86.868896)', rsp)
self.assertIn('en-US', rsp)
self.assertIn('testname2', rsp)
self.assertIn('testdesc2', rsp)
self.assertIn('correctResponsesPattern', rsp)
def test_get_wrong_activity(self):
response = self.client.get(reverse(activities), {'activityId': 'act:act:foo'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 404)
def test_head_wrong_activity(self):
response = self.client.head(reverse(activities), {'activityId': 'act:act:foo'}, Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 404)
def test_get_no_activity(self):
response = self.client.get(reverse(activities), Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 400)
def test_post(self):
response = self.client.post(reverse(activities), {'activityId':'act:my_activity'},
content_type='application/x-www-form-urlencoded', Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 405)
def test_delete(self):
response = self.client.delete(reverse(activities), {'activityId':'act:my_activity'},
content_type='application/x-www-form-urlencoded', Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 405)
def test_put(self):
response = self.client.put(reverse(activities), {'activityId':'act:my_activity'},
content_type='application/x-www-form-urlencoded', Authorization=self.auth, X_Experience_API_Version=settings.XAPI_VERSION)
self.assertEqual(response.status_code, 405)
|
diagonalwalnut/Experience
|
lrs/tests/ActivityTests.py
|
Python
|
apache-2.0
| 25,452
|
import abc
import datetime
import decimal
import functools
import json
import uuid
import pytz
import msgpack
import six
from lymph.utils import Undefined
@six.add_metaclass(abc.ABCMeta)
class ExtensionTypeSerializer(object):
@abc.abstractmethod
def serialize(self, obj):
raise NotImplementedError
@abc.abstractmethod
def deserialize(self, obj):
raise NotImplementedError
class DatetimeSerializer(ExtensionTypeSerializer):
format = '%Y-%m-%dT%H:%M:%SZ'
def serialize(self, obj):
result = obj.strftime(self.format)
if obj.tzinfo:
return str(obj.tzinfo), result
return result
def deserialize(self, obj):
try:
tzinfo, obj = obj
except ValueError:
tzinfo = None
result = datetime.datetime.strptime(obj, self.format)
if not tzinfo:
return result
return pytz.timezone(tzinfo).localize(result)
class DateSerializer(ExtensionTypeSerializer):
format = '%Y-%m-%d'
def serialize(self, obj):
return obj.strftime(self.format)
def deserialize(self, obj):
return datetime.datetime.strptime(obj, self.format).date()
class TimeSerializer(ExtensionTypeSerializer):
format = '%H:%M:%SZ'
def serialize(self, obj):
return obj.strftime(self.format)
def deserialize(self, obj):
return datetime.datetime.strptime(obj, self.format).time()
class StrSerializer(ExtensionTypeSerializer):
def __init__(self, factory):
self.factory = factory
def serialize(self, obj):
return str(obj)
def deserialize(self, obj):
return self.factory(obj)
class SetSerializer(ExtensionTypeSerializer):
def serialize(self, obj):
return list(obj)
def deserialize(self, obj):
return set(obj)
class UndefinedSerializer(ExtensionTypeSerializer):
def serialize(self, obj):
return ''
def deserialize(self, obj):
return Undefined
_extension_type_serializers = {
'datetime': DatetimeSerializer(),
'date': DateSerializer(),
'time': TimeSerializer(),
'Decimal': StrSerializer(decimal.Decimal),
'UUID': StrSerializer(uuid.UUID),
'set': SetSerializer(),
'UndefinedType': UndefinedSerializer(),
}
class BaseSerializer(object):
def __init__(self, dumps=None, loads=None, load=None, dump=None):
self._dumps = dumps
self._loads = loads
self._load = load
self._dump = dump
def dump_object(self, obj):
obj_type = type(obj)
serializer = _extension_type_serializers.get(obj_type.__name__)
if serializer:
obj = {
'__type__': obj_type.__name__,
'_': serializer.serialize(obj),
}
elif hasattr(obj, '_lymph_dump_'):
obj = obj._lymph_dump_()
return obj
def load_object(self, obj):
obj_type = obj.get('__type__')
if obj_type:
serializer = _extension_type_serializers.get(obj_type)
return serializer.deserialize(obj['_'])
return obj
def dumps(self, obj):
return self._dumps(obj, default=self.dump_object)
def loads(self, s):
return self._loads(s, object_hook=self.load_object)
def dump(self, obj, f):
return self._dump(obj, f, default=self.dump_object)
def load(self, f):
return self._load(f, object_hook=self.load_object)
msgpack_serializer = BaseSerializer(
dumps=functools.partial(msgpack.dumps, use_bin_type=True),
loads=functools.partial(msgpack.loads, encoding='utf-8'),
dump=functools.partial(msgpack.dump, use_bin_type=True),
load=functools.partial(msgpack.load, encoding='utf-8'),
)
json_serializer = BaseSerializer(dumps=json.dumps, loads=json.loads, dump=json.dump, load=json.load)
|
itakouna/lymph
|
lymph/serializers/base.py
|
Python
|
apache-2.0
| 3,844
|
import os
import numpy as np
import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
# Create output directory if it does not already exist
if not os.path.exists('frames'):
os.mkdir('frames')
# Open model
m = ModelOutput('flyaround_cube.rtout')
# Read image from model
image = m.get_image(distance=300 * pc, units='MJy/sr')
# image.val is now an array with four dimensions (n_view, n_y, n_x, n_wav)
for iview in range(image.val.shape[0]):
# Open figure and create axes
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1)
# This is the command to show the image. The parameters vmin and vmax are
# the min and max levels for the grayscale (remove for default values).
# The colormap is set here to be a heat map. Other possible heat maps
# include plt.cm.gray (grayscale), plt.cm.gist_yarg (inverted grayscale),
# plt.cm.jet (default, colorful). The np.sqrt() is used to plot the
# images on a sqrt stretch.
ax.imshow(np.sqrt(image.val[iview, :, :, 0]), vmin=0, vmax=np.sqrt(2000.),
cmap=plt.cm.gist_heat, origin='lower')
# Save figure. The facecolor='black' and edgecolor='black' are for
# esthetics, and hide the axes
fig.savefig('frames/frame_%05i.png' % iview,
facecolor='black', edgecolor='black')
# Close figure
plt.close(fig)
|
bluescarni/hyperion
|
docs/tutorials/scripts/flyaround_cube_animate.py
|
Python
|
bsd-2-clause
| 1,402
|
# Copyright 2013 django-htmlmin authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from functools import wraps
from htmlmin.minify import html_minify
def minified_response(f):
@wraps(f)
def minify(*args, **kwargs):
response = f(*args, **kwargs)
minifiable_status = response.status_code == 200
minifiable_content = 'text/html' in response['Content-Type']
if minifiable_status and minifiable_content:
response.content = html_minify(response.content)
return response
return minify
def not_minified_response(f):
@wraps(f)
def not_minify(*args, **kwargs):
response = f(*args, **kwargs)
response.minify_response = False
return response
return not_minify
|
erikdejonge/django-htmlmin
|
htmlmin/decorators.py
|
Python
|
bsd-2-clause
| 837
|
# -*- coding: utf-8 -*-
""" Flexx setup script.
"""
import os
from os import path as op
try:
# use setuptools namespace, allows for "develop"
import setuptools # noqa, analysis:ignore
except ImportError:
pass # it's not essential for installation
from distutils.core import setup
name = 'flexx'
description = "Pure Python toolkit for creating GUI's using web technology."
# Get version and docstring
__version__ = None
__doc__ = ''
docStatus = 0 # Not started, in progress, done
initFile = os.path.join(os.path.dirname(__file__), name, '__init__.py')
for line in open(initFile).readlines():
if (line.startswith('version_info') or line.startswith('__version__')):
exec(line.strip())
elif line.startswith('"""'):
if docStatus == 0:
docStatus = 1
line = line.lstrip('"')
elif docStatus == 1:
docStatus = 2
if docStatus == 1:
__doc__ += line
def package_tree(pkgroot):
path = os.path.dirname(__file__)
subdirs = [os.path.relpath(i[0], path).replace(os.path.sep, '.')
for i in os.walk(os.path.join(path, pkgroot))
if '__init__.py' in i[2]]
return subdirs
setup(
name=name,
version=__version__,
author='Flexx contributors',
author_email='almar.klein@gmail.com',
license='(new) BSD',
url='http://flexx.readthedocs.org',
download_url='https://pypi.python.org/pypi/flexx',
keywords="ui design, web runtime, pyscript, reactive programming, FRP",
description=description,
long_description=__doc__,
platforms='any',
provides=[name],
install_requires=[],
packages=package_tree(name),
package_dir={name: name},
package_data={},
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Education',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
#'Programming Language :: Python :: 2.7', # not yet supported
'Programming Language :: Python :: 3.4',
],
)
|
sesh/flexx
|
setup.py
|
Python
|
bsd-2-clause
| 2,298
|
from django import forms
from django.conf import settings
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.utils.encoding import force_unicode
from django.utils import simplejson
from django.core.exceptions import ImproperlyConfigured
from django.forms.util import flatatt
json_encode = simplejson.JSONEncoder().encode
DEFAULT_CONFIG = {
'skin': 'django',
'toolbar': 'Full',
'height': 291,
'width': 835,
'filebrowserWindowWidth': 940,
'filebrowserWindowHeight': 725,
}
class CKEditorWidget(forms.Textarea):
"""
Widget providing CKEditor for Rich Text Editing.
Supports direct image uploads and embed.
"""
class Media:
try:
js = (
settings.STATIC_URL + 'ckeditor/ckeditor/ckeditor.js',
)
except AttributeError:
raise ImproperlyConfigured("django-ckeditor requires \
CKEDITOR_MEDIA_PREFIX setting. This setting specifies a \
URL prefix to the ckeditor JS and CSS media (not \
uploaded media). Make sure to use a trailing slash: \
CKEDITOR_MEDIA_PREFIX = '/media/ckeditor/'")
def __init__(self, config_name='default', *args, **kwargs):
super(CKEditorWidget, self).__init__(*args, **kwargs)
# Setup config from defaults.
self.config = DEFAULT_CONFIG.copy()
# Try to get valid config from settings.
configs = getattr(settings, 'CKEDITOR_CONFIGS', None)
if configs != None:
if isinstance(configs, dict):
# Make sure the config_name exists.
if config_name in configs:
config = configs[config_name]
# Make sure the configuration is a dictionary.
if not isinstance(config, dict):
raise ImproperlyConfigured('CKEDITOR_CONFIGS["%s"] \
setting must be a dictionary type.' % \
config_name)
# Override defaults with settings config.
self.config.update(config)
else:
raise ImproperlyConfigured("No configuration named '%s' \
found in your CKEDITOR_CONFIGS setting." % \
config_name)
else:
raise ImproperlyConfigured('CKEDITOR_CONFIGS setting must be a\
dictionary type.')
def render(self, name, value, attrs={}):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
self.config['filebrowserUploadUrl'] = reverse('ckeditor_upload')
self.config['filebrowserBrowseUrl'] = reverse('ckeditor_browse')
return mark_safe(render_to_string('ckeditor/widget.html', {
'final_attrs': flatatt(final_attrs),
'value': conditional_escape(force_unicode(value)),
'id': final_attrs['id'],
'config': json_encode(self.config)
})
)
|
Xangis/django-ckeditor
|
ckeditor/widgets.py
|
Python
|
bsd-3-clause
| 3,226
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Urwid LCD display module
# Copyright (C) 2010 Ian Ward
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Urwid web site: http://excess.org/urwid/
from .display_common import BaseScreen
import time
class LCDScreen(BaseScreen):
def set_terminal_properties(self, colors=None, bright_is_bold=None,
has_underline=None):
pass
def set_mouse_tracking(self, enable=True):
pass
def start(self):
pass
def stop(self):
pass
def set_input_timeouts(self, *args):
pass
def reset_default_terminal_palette(self, *args):
pass
def run_wrapper(self,fn):
return fn()
def draw_screen(self, xxx_todo_changeme, r ):
(cols, rows) = xxx_todo_changeme
pass
def clear(self):
pass
def get_cols_rows(self):
return self.DISPLAY_SIZE
class CFLCDScreen(LCDScreen):
"""
Common methods for Crystal Fontz LCD displays
"""
KEYS = [None, # no key with code 0
'up_press', 'down_press', 'left_press',
'right_press', 'enter_press', 'exit_press',
'up_release', 'down_release', 'left_release',
'right_release', 'enter_release', 'exit_release',
'ul_press', 'ur_press', 'll_press', 'lr_press',
'ul_release', 'ur_release', 'll_release', 'lr_release']
CMD_PING = 0
CMD_VERSION = 1
CMD_CLEAR = 6
CMD_CGRAM = 9
CMD_CURSOR_POSITION = 11 # data = [col, row]
CMD_CURSOR_STYLE = 12 # data = [style (0-4)]
CMD_LCD_CONTRAST = 13 # data = [contrast (0-255)]
CMD_BACKLIGHT = 14 # data = [power (0-100)]
CMD_LCD_DATA = 31 # data = [col, row] + text
CMD_GPO = 34 # data = [pin(0-12), value(0-100)]
# sent from device
CMD_KEY_ACTIVITY = 0x80
CMD_ACK = 0x40 # in high two bits ie. & 0xc0
CURSOR_NONE = 0
CURSOR_BLINKING_BLOCK = 1
CURSOR_UNDERSCORE = 2
CURSOR_BLINKING_BLOCK_UNDERSCORE = 3
CURSOR_INVERTING_BLINKING_BLOCK = 4
MAX_PACKET_DATA_LENGTH = 22
colors = 1
has_underline = False
def __init__(self, device_path, baud):
"""
device_path -- eg. '/dev/ttyUSB0'
baud -- baud rate
"""
super(CFLCDScreen, self).__init__()
self.device_path = device_path
from serial import Serial
self._device = Serial(device_path, baud, timeout=0)
self._unprocessed = ""
@classmethod
def get_crc(cls, buf):
# This seed makes the output of this shift based algorithm match
# the table based algorithm. The center 16 bits of the 32-bit
# "newCRC" are used for the CRC. The MSB of the lower byte is used
# to see what bit was shifted out of the center 16 bit CRC
# accumulator ("carry flag analog");
newCRC = 0x00F32100
for byte in buf:
# Push this byte’s bits through a software
# implementation of a hardware shift & xor.
for bit_count in range(8):
# Shift the CRC accumulator
newCRC >>= 1
# The new MSB of the CRC accumulator comes
# from the LSB of the current data byte.
if ord(byte) & (0x01 << bit_count):
newCRC |= 0x00800000
# If the low bit of the current CRC accumulator was set
# before the shift, then we need to XOR the accumulator
# with the polynomial (center 16 bits of 0x00840800)
if newCRC & 0x00000080:
newCRC ^= 0x00840800
# All the data has been done. Do 16 more bits of 0 data.
for bit_count in range(16):
# Shift the CRC accumulator
newCRC >>= 1
# If the low bit of the current CRC accumulator was set
# before the shift we need to XOR the accumulator with
# 0x00840800.
if newCRC & 0x00000080:
newCRC ^= 0x00840800
# Return the center 16 bits, making this CRC match the one’s
# complement that is sent in the packet.
return ((~newCRC)>>8) & 0xffff
def _send_packet(self, command, data):
"""
low-level packet sending.
Following the protocol requires waiting for ack packet between
sending each packet to the device.
"""
buf = chr(command) + chr(len(data)) + data
crc = self.get_crc(buf)
buf = buf + chr(crc & 0xff) + chr(crc >> 8)
self._device.write(buf)
def _read_packet(self):
"""
low-level packet reading.
returns (command/report code, data) or None
This method stored data read and tries to resync when bad data
is received.
"""
# pull in any new data available
self._unprocessed = self._unprocessed + self._device.read()
while True:
try:
command, data, unprocessed = self._parse_data(self._unprocessed)
self._unprocessed = unprocessed
return command, data
except self.MoreDataRequired:
return
except self.InvalidPacket:
# throw out a byte and try to parse again
self._unprocessed = self._unprocessed[1:]
class InvalidPacket(Exception):
pass
class MoreDataRequired(Exception):
pass
@classmethod
def _parse_data(cls, data):
"""
Try to read a packet from the start of data, returning
(command/report code, packet_data, remaining_data)
or raising InvalidPacket or MoreDataRequired
"""
if len(data) < 2:
raise cls.MoreDataRequired
command = ord(data[0])
plen = ord(data[1])
if plen > cls.MAX_PACKET_DATA_LENGTH:
raise cls.InvalidPacket("length value too large")
if len(data) < plen + 4:
raise cls.MoreDataRequired
crc = cls.get_crc(data[:2 + plen])
pcrc = ord(data[2 + plen]) + (ord(data[3 + plen]) << 8 )
if crc != pcrc:
raise cls.InvalidPacket("CRC doesn't match")
return (command, data[2:2 + plen], data[4 + plen:])
class KeyRepeatSimulator(object):
"""
Provide simulated repeat key events when given press and
release events.
If two or more keys are pressed disable repeating until all
keys are released.
"""
def __init__(self, repeat_delay, repeat_next):
"""
repeat_delay -- seconds to wait before starting to repeat keys
repeat_next -- time between each repeated key
"""
self.repeat_delay = repeat_delay
self.repeat_next = repeat_next
self.pressed = {}
self.multiple_pressed = False
def press(self, key):
if self.pressed:
self.multiple_pressed = True
self.pressed[key] = time.time()
def release(self, key):
if key not in self.pressed:
return # ignore extra release events
del self.pressed[key]
if not self.pressed:
self.multiple_pressed = False
def next_event(self):
"""
Return (remaining, key) where remaining is the number of seconds
(float) until the key repeat event should be sent, or None if no
events are pending.
"""
if len(self.pressed) != 1 or self.multiple_pressed:
return
for key in self.pressed:
return max(0, self.pressed[key] + self.repeat_delay
- time.time()), key
def sent_event(self):
"""
Cakk this method when you have sent a key repeat event so the
timer will be reset for the next event
"""
if len(self.pressed) != 1:
return # ignore event that shouldn't have been sent
for key in self.pressed:
self.pressed[key] = (
time.time() - self.repeat_delay + self.repeat_next)
return
class CF635Screen(CFLCDScreen):
"""
Crystal Fontz 635 display
20x4 character display + cursor
no foreground/background colors or settings supported
see CGROM for list of close unicode matches to characters available
6 button input
up, down, left, right, enter (check mark), exit (cross)
"""
DISPLAY_SIZE = (20, 4)
# ① through ⑧ are programmable CGRAM (chars 0-7, repeated at 8-15)
# double arrows (⇑⇓) appear as double arrowheads (chars 18, 19)
# ⑴ resembles a bell
# ⑵ resembles a filled-in "Y"
# ⑶ is the letters "Pt" together
# partial blocks (▇▆▄▃▁) are actually shorter versions of (▉▋▌▍▏)
# both groups are intended to draw horizontal bars with pixel
# precision, use ▇*[▆▄▃▁]? for a thin bar or ▉*[▋▌▍▏]? for a thick bar
CGROM = (
"①②③④⑤⑥⑦⑧①②③④⑤⑥⑦⑧"
"►◄⇑⇓«»↖↗↙↘▲▼↲^ˇ█"
" !\"#¤%&'()*+,-./"
"0123456789:;<=>?"
"¡ABCDEFGHIJKLMNO"
"PQRSTUVWXYZÄÖÑܧ"
"¿abcdefghijklmno"
"pqrstuvwxyzäöñüà"
"⁰¹²³⁴⁵⁶⁷⁸⁹½¼±≥≤μ"
"♪♫⑴♥♦⑵⌜⌟“”()αɛδ∞"
"@£$¥èéùìòÇᴾØøʳÅå"
"⌂¢ΦτλΩπΨΣθΞ♈ÆæßÉ"
"ΓΛΠϒ_ÈÊêçğŞşİι~◊"
"▇▆▄▃▁ƒ▉▋▌▍▏⑶◽▪↑→"
"↓←ÁÍÓÚÝáíóúýÔôŮů"
"ČĔŘŠŽčĕřšž[\]{|}")
cursor_style = CFLCDScreen.CURSOR_INVERTING_BLINKING_BLOCK
def __init__(self, device_path, baud=115200,
repeat_delay=0.5, repeat_next=0.125,
key_map=['up', 'down', 'left', 'right', 'enter', 'esc']):
"""
device_path -- eg. '/dev/ttyUSB0'
baud -- baud rate
repeat_delay -- seconds to wait before starting to repeat keys
repeat_next -- time between each repeated key
key_map -- the keys to send for this device's buttons
"""
super(CF635Screen, self).__init__(device_path, baud)
self.repeat_delay = repeat_delay
self.repeat_next = repeat_next
self.key_repeat = KeyRepeatSimulator(repeat_delay, repeat_next)
self.key_map = key_map
self._last_command = None
self._last_command_time = 0
self._command_queue = []
self._screen_buf = None
self._previous_canvas = None
self._update_cursor = False
def get_input_descriptors(self):
"""
return the fd from our serial device so we get called
on input and responses
"""
return [self._device.fd]
def get_input_nonblocking(self):
"""
Return a (next_input_timeout, keys_pressed, raw_keycodes)
tuple.
The protocol for our device requires waiting for acks between
each command, so this method responds to those as well as key
press and release events.
Key repeat events are simulated here as the device doesn't send
any for us.
raw_keycodes are the bytes of messages we received, which might
not seem to have any correspondence to keys_pressed.
"""
input = []
raw_input = []
timeout = None
while True:
packet = self._read_packet()
if not packet:
break
command, data = packet
if command == self.CMD_KEY_ACTIVITY and data:
d0 = ord(data[0])
if 1 <= d0 <= 12:
release = d0 > 6
keycode = d0 - (release * 6) - 1
key = self.key_map[keycode]
if release:
self.key_repeat.release(key)
else:
input.append(key)
self.key_repeat.press(key)
raw_input.append(d0)
elif command & 0xc0 == 0x40: # "ACK"
if command & 0x3f == self._last_command:
self._send_next_command()
next_repeat = self.key_repeat.next_event()
if next_repeat:
timeout, key = next_repeat
if not timeout:
input.append(key)
self.key_repeat.sent_event()
timeout = None
return timeout, input, []
def _send_next_command(self):
"""
send out the next command in the queue
"""
if not self._command_queue:
self._last_command = None
return
command, data = self._command_queue.pop(0)
self._send_packet(command, data)
self._last_command = command # record command for ACK
self._last_command_time = time.time()
def queue_command(self, command, data):
self._command_queue.append((command, data))
# not waiting? send away!
if self._last_command is None:
self._send_next_command()
def draw_screen(self, size, canvas):
assert size == self.DISPLAY_SIZE
if self._screen_buf:
osb = self._screen_buf
else:
osb = []
sb = []
y = 0
for row in canvas.content():
text = []
for a, cs, run in row:
text.append(run)
if not osb or osb[y] != text:
self.queue_command(self.CMD_LCD_DATA, chr(0) + chr(y) +
"".join(text))
sb.append(text)
y += 1
if (self._previous_canvas and
self._previous_canvas.cursor == canvas.cursor and
(not self._update_cursor or not canvas.cursor)):
pass
elif canvas.cursor is None:
self.queue_command(self.CMD_CURSOR_STYLE, chr(self.CURSOR_NONE))
else:
x, y = canvas.cursor
self.queue_command(self.CMD_CURSOR_POSITION, chr(x) + chr(y))
self.queue_command(self.CMD_CURSOR_STYLE, chr(self.cursor_style))
self._update_cursor = False
self._screen_buf = sb
self._previous_canvas = canvas
def program_cgram(self, index, data):
"""
Program character data. Characters available as chr(0) through
chr(7), and repeated as chr(8) through chr(15).
index -- 0 to 7 index of character to program
data -- list of 8, 6-bit integer values top to bottom with MSB
on the left side of the character.
"""
assert 0 <= index <= 7
assert len(data) == 8
self.queue_command(self.CMD_CGRAM, chr(index) +
"".join([chr(x) for x in data]))
def set_cursor_style(self, style):
"""
style -- CURSOR_BLINKING_BLOCK, CURSOR_UNDERSCORE,
CURSOR_BLINKING_BLOCK_UNDERSCORE or
CURSOR_INVERTING_BLINKING_BLOCK
"""
assert 1 <= style <= 4
self.cursor_style = style
self._update_cursor = True
def set_backlight(self, value):
"""
Set backlight brightness
value -- 0 to 100
"""
assert 0 <= value <= 100
self.queue_command(self.CMD_BACKLIGHT, chr(value))
def set_lcd_contrast(self, value):
"""
value -- 0 to 255
"""
assert 0 <= value <= 255
self.queue_command(self.CMD_LCD_CONTRAST, chr(value))
def set_led_pin(self, led, rg, value):
"""
led -- 0 to 3
rg -- 0 for red, 1 for green
value -- 0 to 100
"""
assert 0 <= led <= 3
assert rg in (0, 1)
assert 0 <= value <= 100
self.queue_command(self.CMD_GPO, chr(12 - 2 * led - rg) +
chr(value))
|
DarkPurpleShadow/ConnectFour
|
urwid/lcd_display.py
|
Python
|
bsd-3-clause
| 16,440
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module includes files automatically generated from ply (these end in
# _lextab.py and _parsetab.py). To generate these files, remove them from this
# folder, then build astropy and run the tests in-place:
#
# python setup.py build_ext --inplace
# pytest astropy/units
#
# You can then commit the changes to the re-generated _lextab.py and
# _parsetab.py files.
"""
Handles a "generic" string format for units
"""
import os
import re
import warnings
import sys
from fractions import Fraction
import unicodedata
from . import core, utils
from .base import Base
from astropy.utils import classproperty
from astropy.utils.misc import did_you_mean
def _is_ascii(s):
if sys.version_info >= (3, 7, 0):
return s.isascii()
else:
try:
s.encode('ascii')
return True
except UnicodeEncodeError:
return False
def _to_string(cls, unit):
if isinstance(unit, core.CompositeUnit):
parts = []
if cls._show_scale and unit.scale != 1:
parts.append(f'{unit.scale:g}')
if len(unit.bases):
positives, negatives = utils.get_grouped_by_powers(
unit.bases, unit.powers)
if len(positives):
parts.append(cls._format_unit_list(positives))
elif len(parts) == 0:
parts.append('1')
if len(negatives):
parts.append('/')
unit_list = cls._format_unit_list(negatives)
if len(negatives) == 1:
parts.append(f'{unit_list}')
else:
parts.append(f'({unit_list})')
return ' '.join(parts)
elif isinstance(unit, core.NamedUnit):
return cls._get_unit_name(unit)
class Generic(Base):
"""
A "generic" format.
The syntax of the format is based directly on the FITS standard,
but instead of only supporting the units that FITS knows about, it
supports any unit available in the `astropy.units` namespace.
"""
_show_scale = True
_tokens = (
'DOUBLE_STAR',
'STAR',
'PERIOD',
'SOLIDUS',
'CARET',
'OPEN_PAREN',
'CLOSE_PAREN',
'FUNCNAME',
'UNIT',
'SIGN',
'UINT',
'UFLOAT'
)
@classproperty(lazy=True)
def _all_units(cls):
return cls._generate_unit_names()
@classproperty(lazy=True)
def _units(cls):
return cls._all_units[0]
@classproperty(lazy=True)
def _deprecated_units(cls):
return cls._all_units[1]
@classproperty(lazy=True)
def _functions(cls):
return cls._all_units[2]
@classproperty(lazy=True)
def _parser(cls):
return cls._make_parser()
@classproperty(lazy=True)
def _lexer(cls):
return cls._make_lexer()
@classmethod
def _make_lexer(cls):
from astropy.extern.ply import lex
tokens = cls._tokens
t_STAR = r'\*'
t_PERIOD = r'\.'
t_SOLIDUS = r'/'
t_DOUBLE_STAR = r'\*\*'
t_CARET = r'\^'
t_OPEN_PAREN = r'\('
t_CLOSE_PAREN = r'\)'
# NOTE THE ORDERING OF THESE RULES IS IMPORTANT!!
# Regular expression rules for simple tokens
def t_UFLOAT(t):
r'((\d+\.?\d*)|(\.\d+))([eE][+-]?\d+)?'
if not re.search(r'[eE\.]', t.value):
t.type = 'UINT'
t.value = int(t.value)
elif t.value.endswith('.'):
t.type = 'UINT'
t.value = int(t.value[:-1])
else:
t.value = float(t.value)
return t
def t_UINT(t):
r'\d+'
t.value = int(t.value)
return t
def t_SIGN(t):
r'[+-](?=\d)'
t.value = int(t.value + '1')
return t
# This needs to be a function so we can force it to happen
# before t_UNIT
def t_FUNCNAME(t):
r'((sqrt)|(ln)|(exp)|(log)|(mag)|(dB)|(dex))(?=\ *\()'
return t
def t_UNIT(t):
"%|([YZEPTGMkhdcmu\N{MICRO SIGN}npfazy]?'((?!\\d)\\w)+')|((?!\\d)\\w)+"
t.value = cls._get_unit(t)
return t
t_ignore = ' '
# Error handling rule
def t_error(t):
raise ValueError(
f"Invalid character at col {t.lexpos}")
lexer_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'generic_lextab.py'))
lexer = lex.lex(optimize=True, lextab='generic_lextab',
outputdir=os.path.dirname(__file__),
reflags=int(re.UNICODE))
if not lexer_exists:
cls._add_tab_header('generic_lextab')
return lexer
@classmethod
def _make_parser(cls):
"""
The grammar here is based on the description in the `FITS
standard
<http://fits.gsfc.nasa.gov/standard30/fits_standard30aa.pdf>`_,
Section 4.3, which is not terribly precise. The exact grammar
is here is based on the YACC grammar in the `unity library
<https://bitbucket.org/nxg/unity/>`_.
This same grammar is used by the `"fits"` and `"vounit"`
formats, the only difference being the set of available unit
strings.
"""
from astropy.extern.ply import yacc
tokens = cls._tokens
def p_main(p):
'''
main : product_of_units
| factor product_of_units
| factor product product_of_units
| division_product_of_units
| factor division_product_of_units
| factor product division_product_of_units
| inverse_unit
| factor inverse_unit
| factor product inverse_unit
| factor
'''
from astropy.units.core import Unit
if len(p) == 2:
p[0] = Unit(p[1])
elif len(p) == 3:
p[0] = Unit(p[1] * p[2])
elif len(p) == 4:
p[0] = Unit(p[1] * p[3])
def p_division_product_of_units(p):
'''
division_product_of_units : division_product_of_units division product_of_units
| product_of_units
'''
from astropy.units.core import Unit
if len(p) == 4:
p[0] = Unit(p[1] / p[3])
else:
p[0] = p[1]
def p_inverse_unit(p):
'''
inverse_unit : division unit_expression
'''
p[0] = p[2] ** -1
def p_factor(p):
'''
factor : factor_fits
| factor_float
| factor_int
'''
p[0] = p[1]
def p_factor_float(p):
'''
factor_float : signed_float
| signed_float UINT signed_int
| signed_float UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 4:
p[0] = p[1] * p[2] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** float(p[4])
elif len(p) == 2:
p[0] = p[1]
def p_factor_int(p):
'''
factor_int : UINT
| UINT signed_int
| UINT power numeric_power
| UINT UINT signed_int
| UINT UINT power numeric_power
'''
if cls.name == 'fits':
raise ValueError("Numeric factor not supported by FITS")
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** float(p[2])
elif len(p) == 4:
if isinstance(p[2], int):
p[0] = p[1] * p[2] ** float(p[3])
else:
p[0] = p[1] ** float(p[3])
elif len(p) == 5:
p[0] = p[1] * p[2] ** p[4]
def p_factor_fits(p):
'''
factor_fits : UINT power OPEN_PAREN signed_int CLOSE_PAREN
| UINT power OPEN_PAREN UINT CLOSE_PAREN
| UINT power signed_int
| UINT power UINT
| UINT SIGN UINT
| UINT OPEN_PAREN signed_int CLOSE_PAREN
'''
if p[1] != 10:
if cls.name == 'fits':
raise ValueError("Base must be 10")
else:
return
if len(p) == 4:
if p[2] in ('**', '^'):
p[0] = 10 ** p[3]
else:
p[0] = 10 ** (p[2] * p[3])
elif len(p) == 5:
p[0] = 10 ** p[3]
elif len(p) == 6:
p[0] = 10 ** p[4]
def p_product_of_units(p):
'''
product_of_units : unit_expression product product_of_units
| unit_expression product_of_units
| unit_expression
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1] * p[3]
def p_unit_expression(p):
'''
unit_expression : function
| unit_with_power
| OPEN_PAREN product_of_units CLOSE_PAREN
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
def p_unit_with_power(p):
'''
unit_with_power : UNIT power numeric_power
| UNIT numeric_power
| UNIT
'''
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = p[1] ** p[2]
else:
p[0] = p[1] ** p[3]
def p_numeric_power(p):
'''
numeric_power : sign UINT
| OPEN_PAREN paren_expr CLOSE_PAREN
'''
if len(p) == 3:
p[0] = p[1] * p[2]
elif len(p) == 4:
p[0] = p[2]
def p_paren_expr(p):
'''
paren_expr : sign UINT
| signed_float
| frac
'''
if len(p) == 3:
p[0] = p[1] * p[2]
else:
p[0] = p[1]
def p_frac(p):
'''
frac : sign UINT division sign UINT
'''
p[0] = Fraction(p[1] * p[2], p[4] * p[5])
def p_sign(p):
'''
sign : SIGN
|
'''
if len(p) == 2:
p[0] = p[1]
else:
p[0] = 1
def p_product(p):
'''
product : STAR
| PERIOD
'''
pass
def p_division(p):
'''
division : SOLIDUS
'''
pass
def p_power(p):
'''
power : DOUBLE_STAR
| CARET
'''
p[0] = p[1]
def p_signed_int(p):
'''
signed_int : SIGN UINT
'''
p[0] = p[1] * p[2]
def p_signed_float(p):
'''
signed_float : sign UINT
| sign UFLOAT
'''
p[0] = p[1] * p[2]
def p_function_name(p):
'''
function_name : FUNCNAME
'''
p[0] = p[1]
def p_function(p):
'''
function : function_name OPEN_PAREN main CLOSE_PAREN
'''
if p[1] == 'sqrt':
p[0] = p[3] ** 0.5
return
elif p[1] in ('mag', 'dB', 'dex'):
function_unit = cls._parse_unit(p[1])
# In Generic, this is callable, but that does not have to
# be the case in subclasses (e.g., in VOUnit it is not).
if callable(function_unit):
p[0] = function_unit(p[3])
return
raise ValueError("'{}' is not a recognized function".format(p[1]))
def p_error(p):
raise ValueError()
parser_exists = os.path.exists(os.path.join(os.path.dirname(__file__),
'generic_parsetab.py'))
parser = yacc.yacc(debug=False, tabmodule='generic_parsetab',
outputdir=os.path.dirname(__file__))
if not parser_exists:
cls._add_tab_header('generic_parsetab')
return parser
@classmethod
def _get_unit(cls, t):
try:
return cls._parse_unit(t.value)
except ValueError as e:
raise ValueError(
"At col {}, {}".format(
t.lexpos, str(e)))
@classmethod
def _parse_unit(cls, s, detailed_exception=True):
registry = core.get_current_unit_registry().registry
if s == '%':
return registry['percent']
if not _is_ascii(s):
if s[0] == '\N{MICRO SIGN}':
s = 'u' + s[1:]
if s[-1] == '\N{GREEK CAPITAL LETTER OMEGA}':
s = s[:-1] + 'Ohm'
elif s[-1] == '\N{LATIN CAPITAL LETTER A WITH RING ABOVE}':
s = s[:-1] + 'Angstrom'
if s in registry:
return registry[s]
if detailed_exception:
raise ValueError(
'{} is not a valid unit. {}'.format(
s, did_you_mean(s, registry)))
else:
raise ValueError()
_translations = str.maketrans({
'\N{GREEK SMALL LETTER MU}': '\N{MICRO SIGN}',
'\N{MINUS SIGN}': '-',
})
"""Character translations that should be applied before parsing a string.
Note that this does explicitly *not* generally translate MICRO SIGN to u,
since then a string like 'µ' would be interpreted as unit mass.
"""
_superscripts = (
'\N{SUPERSCRIPT MINUS}'
'\N{SUPERSCRIPT PLUS SIGN}'
'\N{SUPERSCRIPT ZERO}'
'\N{SUPERSCRIPT ONE}'
'\N{SUPERSCRIPT TWO}'
'\N{SUPERSCRIPT THREE}'
'\N{SUPERSCRIPT FOUR}'
'\N{SUPERSCRIPT FIVE}'
'\N{SUPERSCRIPT SIX}'
'\N{SUPERSCRIPT SEVEN}'
'\N{SUPERSCRIPT EIGHT}'
'\N{SUPERSCRIPT NINE}'
)
_superscript_translations = str.maketrans(_superscripts, '-+0123456789')
_regex_superscript = re.compile(f'[{_superscripts}]+')
_regex_deg = re.compile('°([CF])?')
@classmethod
def _convert_superscript(cls, m):
return '({})'.format(
m.group().translate(cls._superscript_translations)
)
@classmethod
def _convert_deg(cls, m):
if len(m.string) == 1:
return 'deg'
return m.string.replace('°', 'deg_')
@classmethod
def parse(cls, s, debug=False):
if not isinstance(s, str):
s = s.decode('ascii')
elif not _is_ascii(s):
# common normalization of unicode strings to avoid
# having to deal with multiple representations of
# the same character. This normalizes to "composed" form
# and will e.g. convert OHM SIGN to GREEK CAPITAL LETTER OMEGA
s = unicodedata.normalize('NFC', s)
# Translate some basic unicode items that we'd like to support on
# input but are not standard.
s = s.translate(cls._translations)
# TODO: might the below be better done in the parser/lexer?
# Translate superscripts to parenthesized numbers; this ensures
# that mixes of superscripts and regular numbers fail.
s = cls._regex_superscript.sub(cls._convert_superscript, s)
# Translate possible degrees.
s = cls._regex_deg.sub(cls._convert_deg, s)
result = cls._do_parse(s, debug=debug)
# Check for excess solidi, but exclude fractional exponents (accepted)
n_slashes = s.count('/')
if n_slashes > 1 and (n_slashes - len(re.findall(r'\(\d+/\d+\)', s))) > 1:
warnings.warn(
"'{}' contains multiple slashes, which is "
"discouraged by the FITS standard".format(s),
core.UnitsWarning)
return result
@classmethod
def _do_parse(cls, s, debug=False):
try:
# This is a short circuit for the case where the string
# is just a single unit name
return cls._parse_unit(s, detailed_exception=False)
except ValueError as e:
try:
return cls._parser.parse(s, lexer=cls._lexer, debug=debug)
except ValueError as e:
if str(e):
raise
else:
raise ValueError(f"Syntax error parsing unit '{s}'")
@classmethod
def _get_unit_name(cls, unit):
return unit.get_format_name('generic')
@classmethod
def _format_unit_list(cls, units):
out = []
units.sort(key=lambda x: cls._get_unit_name(x[0]).lower())
for base, power in units:
if power == 1:
out.append(cls._get_unit_name(base))
else:
power = utils.format_power(power)
if '/' in power or '.' in power:
out.append('{}({})'.format(
cls._get_unit_name(base), power))
else:
out.append('{}{}'.format(
cls._get_unit_name(base), power))
return ' '.join(out)
@classmethod
def to_string(cls, unit):
return _to_string(cls, unit)
class Unscaled(Generic):
"""
A format that doesn't display the scale part of the unit, other
than that, it is identical to the `Generic` format.
This is used in some error messages where the scale is irrelevant.
"""
_show_scale = False
|
MSeifert04/astropy
|
astropy/units/format/generic.py
|
Python
|
bsd-3-clause
| 18,514
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import math
import sys
from telemetry.timeline import model as model_module
from telemetry.value import scalar
from telemetry.value import list_of_scalar_values
from telemetry.web_perf.metrics import timeline_based_metric
TOPLEVEL_GL_CATEGORY = 'gpu_toplevel'
TOPLEVEL_SERVICE_CATEGORY = 'disabled-by-default-gpu.service'
TOPLEVEL_DEVICE_CATEGORY = 'disabled-by-default-gpu.device'
SERVICE_FRAME_END_MARKER = (TOPLEVEL_SERVICE_CATEGORY, 'SwapBuffer')
DEVICE_FRAME_END_MARKER = (TOPLEVEL_DEVICE_CATEGORY, 'SwapBuffer')
TRACKED_GL_CONTEXT_NAME = { 'RenderCompositor': 'render_compositor',
'BrowserCompositor': 'browser_compositor',
'Compositor': 'browser_compositor' }
def _CalculateFrameTimes(events_per_frame, event_data_func):
"""Given a list of events per frame and a function to extract event time data,
returns a list of frame times."""
times_per_frame = []
for event_list in events_per_frame:
event_times = [event_data_func(event) for event in event_list]
times_per_frame.append(sum(event_times))
return times_per_frame
def _CPUFrameTimes(events_per_frame):
"""Given a list of events per frame, returns a list of CPU frame times."""
# CPU event frames are calculated using the event thread duration.
# Some platforms do not support thread_duration, convert those to 0.
return _CalculateFrameTimes(events_per_frame,
lambda event : event.thread_duration or 0)
def _GPUFrameTimes(events_per_frame):
"""Given a list of events per frame, returns a list of GPU frame times."""
# GPU event frames are asynchronous slices which use the event duration.
return _CalculateFrameTimes(events_per_frame,
lambda event : event.duration)
def TimelineName(name, source_type, value_type):
"""Constructs the standard name given in the timeline.
Args:
name: The name of the timeline, for example "total", or "render_compositor".
source_type: One of "cpu", "gpu" or None. None is only used for total times.
value_type: the type of value. For example "mean", "stddev"...etc.
"""
if source_type:
return '%s_%s_%s_time' % (name, value_type, source_type)
else:
return '%s_%s_time' % (name, value_type)
class GPUTimelineMetric(timeline_based_metric.TimelineBasedMetric):
"""Computes GPU based metrics."""
def __init__(self):
super(GPUTimelineMetric, self).__init__()
def AddResults(self, model, _, interaction_records, results):
self.VerifyNonOverlappedRecords(interaction_records)
service_times = self._CalculateGPUTimelineData(model)
for value_item, durations in service_times.iteritems():
count = len(durations)
avg = 0.0
stddev = 0.0
maximum = 0.0
if count:
avg = sum(durations) / count
stddev = math.sqrt(sum((d - avg) ** 2 for d in durations) / count)
maximum = max(durations)
name, src = value_item
if src:
frame_times_name = '%s_%s_frame_times' % (name, src)
else:
frame_times_name = '%s_frame_times' % (name)
if durations:
results.AddValue(list_of_scalar_values.ListOfScalarValues(
results.current_page, frame_times_name, 'ms', durations))
results.AddValue(scalar.ScalarValue(results.current_page,
TimelineName(name, src, 'max'),
'ms', maximum))
results.AddValue(scalar.ScalarValue(results.current_page,
TimelineName(name, src, 'mean'),
'ms', avg))
results.AddValue(scalar.ScalarValue(results.current_page,
TimelineName(name, src, 'stddev'),
'ms', stddev))
def _CalculateGPUTimelineData(self, model):
"""Uses the model and calculates the times for various values for each
frame. The return value will be a dictionary of the following format:
{
(EVENT_NAME1, SRC1_TYPE): [FRAME0_TIME, FRAME1_TIME...etc.],
(EVENT_NAME2, SRC2_TYPE): [FRAME0_TIME, FRAME1_TIME...etc.],
}
Events:
swap - The time in milliseconds between each swap marker.
total - The amount of time spent in the renderer thread.
TRACKED_NAMES: Using the TRACKED_GL_CONTEXT_NAME dict, we
include the traces per frame for the
tracked name.
Source Types:
None - This will only be valid for the "swap" event.
cpu - For an event, the "cpu" source type signifies time spent on the
gpu thread using the CPU. This uses the "gpu.service" markers.
gpu - For an event, the "gpu" source type signifies time spent on the
gpu thread using the GPU. This uses the "gpu.device" markers.
"""
all_service_events = []
current_service_frame_end = sys.maxint
current_service_events = []
all_device_events = []
current_device_frame_end = sys.maxint
current_device_events = []
tracked_events = {}
tracked_events.update(
dict([((value, 'cpu'), [])
for value in TRACKED_GL_CONTEXT_NAME.itervalues()]))
tracked_events.update(
dict([((value, 'gpu'), [])
for value in TRACKED_GL_CONTEXT_NAME.itervalues()]))
# These will track traces within the current frame.
current_tracked_service_events = collections.defaultdict(list)
current_tracked_device_events = collections.defaultdict(list)
event_iter = model.IterAllEvents(
event_type_predicate=model_module.IsSliceOrAsyncSlice)
for event in event_iter:
# Look for frame end markers
if (event.category, event.name) == SERVICE_FRAME_END_MARKER:
current_service_frame_end = event.end
elif (event.category, event.name) == DEVICE_FRAME_END_MARKER:
current_device_frame_end = event.end
# Track all other toplevel gl category markers
elif event.args.get('gl_category', None) == TOPLEVEL_GL_CATEGORY:
base_name = event.name
dash_index = base_name.rfind('-')
if dash_index != -1:
base_name = base_name[:dash_index]
tracked_name = TRACKED_GL_CONTEXT_NAME.get(base_name, None)
if event.category == TOPLEVEL_SERVICE_CATEGORY:
# Check if frame has ended.
if event.start >= current_service_frame_end:
if current_service_events:
all_service_events.append(current_service_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
tracked_events[(value, 'cpu')].append(
current_tracked_service_events[value])
current_service_events = []
current_service_frame_end = sys.maxint
current_tracked_service_events.clear()
current_service_events.append(event)
if tracked_name:
current_tracked_service_events[tracked_name].append(event)
elif event.category == TOPLEVEL_DEVICE_CATEGORY:
# Check if frame has ended.
if event.start >= current_device_frame_end:
if current_device_events:
all_device_events.append(current_device_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
tracked_events[(value, 'gpu')].append(
current_tracked_device_events[value])
current_device_events = []
current_device_frame_end = sys.maxint
current_tracked_device_events.clear()
current_device_events.append(event)
if tracked_name:
current_tracked_device_events[tracked_name].append(event)
# Append Data for Last Frame.
if current_service_events:
all_service_events.append(current_service_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
tracked_events[(value, 'cpu')].append(
current_tracked_service_events[value])
if current_device_events:
all_device_events.append(current_device_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
tracked_events[(value, 'gpu')].append(
current_tracked_device_events[value])
# Calculate Mean Frame Time for the CPU side.
frame_times = []
if all_service_events:
prev_frame_end = all_service_events[0][0].start
for event_list in all_service_events:
last_service_event_in_frame = event_list[-1]
frame_times.append(last_service_event_in_frame.end - prev_frame_end)
prev_frame_end = last_service_event_in_frame.end
# Create the timeline data dictionary for service side traces.
total_frame_value = ('swap', None)
cpu_frame_value = ('total', 'cpu')
gpu_frame_value = ('total', 'gpu')
timeline_data = {}
timeline_data[total_frame_value] = frame_times
timeline_data[cpu_frame_value] = _CPUFrameTimes(all_service_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
cpu_value = (value, 'cpu')
timeline_data[cpu_value] = _CPUFrameTimes(tracked_events[cpu_value])
# Add in GPU side traces if it was supported (IE. device traces exist).
if all_device_events:
timeline_data[gpu_frame_value] = _GPUFrameTimes(all_device_events)
for value in TRACKED_GL_CONTEXT_NAME.itervalues():
gpu_value = (value, 'gpu')
tracked_gpu_event = tracked_events[gpu_value]
timeline_data[gpu_value] = _GPUFrameTimes(tracked_gpu_event)
return timeline_data
|
hefen1/chromium
|
tools/telemetry/telemetry/web_perf/metrics/gpu_timeline.py
|
Python
|
bsd-3-clause
| 9,768
|
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
# Tests for v2 features.
import atom_tests.core_test
import atom_tests.data_test
import atom_tests.http_core_test
import atom_tests.auth_test
import atom_tests.mock_http_core_test
import atom_tests.client_test
import gdata_tests.client_test
import gdata_tests.core_test
import gdata_tests.data_test
import gdata_tests.data_smoke_test
import gdata_tests.client_smoke_test
import gdata_tests.live_client_test
import gdata_tests.gauth_test
import gdata_tests.blogger.data_test
import gdata_tests.blogger.live_client_test
import gdata_tests.maps.data_test
import gdata_tests.maps.live_client_test
import gdata_tests.spreadsheets.data_test
import gdata_tests.spreadsheets.live_client_test
import gdata_tests.projecthosting.data_test
import gdata_tests.projecthosting.live_client_test
import gdata_tests.sites.data_test
import gdata_tests.sites.live_client_test
import gdata_tests.analytics.data_test
import gdata_tests.analytics.live_client_test
import gdata_tests.contacts.live_client_test
import gdata_tests.calendar_resource.live_client_test
import gdata_tests.calendar_resource.data_test
import gdata_tests.apps.emailsettings.data_test
import gdata_tests.apps.emailsettings.live_client_test
def suite():
return unittest.TestSuite((
atom_tests.core_test.suite(),
atom_tests.data_test.suite(),
atom_tests.http_core_test.suite(),
atom_tests.auth_test.suite(),
atom_tests.mock_http_core_test.suite(),
atom_tests.client_test.suite(),
gdata_tests.client_test.suite(),
gdata_tests.core_test.suite(),
gdata_tests.data_test.suite(),
gdata_tests.data_smoke_test.suite(),
gdata_tests.client_smoke_test.suite(),
gdata_tests.live_client_test.suite(),
gdata_tests.gauth_test.suite(),
gdata_tests.blogger.data_test.suite(),
gdata_tests.blogger.live_client_test.suite(),
gdata_tests.maps.data_test.suite(),
gdata_tests.maps.live_client_test.suite(),
gdata_tests.spreadsheets.data_test.suite(),
gdata_tests.spreadsheets.live_client_test.suite(),
gdata_tests.projecthosting.data_test.suite(),
gdata_tests.projecthosting.live_client_test.suite(),
gdata_tests.sites.data_test.suite(),
gdata_tests.sites.live_client_test.suite(),
gdata_tests.analytics.data_test.suite(),
gdata_tests.analytics.live_client_test.suite(),
gdata_tests.contacts.live_client_test.suite(),
gdata_tests.calendar_resource.live_client_test.suite(),
gdata_tests.calendar_resource.data_test.suite(),
gdata_tests.apps.emailsettings.data_test.suite(),
gdata_tests.apps.emailsettings.live_client_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
|
dekom/threepress-bookworm-read-only
|
bookworm/gdata/tests/all_tests.py
|
Python
|
bsd-3-clause
| 3,464
|
"""
Polish-specific form helpers
"""
import re
from django.newforms import ValidationError
from django.newforms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
class PLVoivodeshipSelect(Select):
"""
A select widget with list of Polish voivodeships (administrative provinces)
as choices.
"""
def __init__(self, attrs=None):
from pl_voivodeships import VOIVODESHIP_CHOICES
super(PLVoivodeshipSelect, self).__init__(attrs, choices=VOIVODESHIP_CHOICES)
class PLAdministrativeUnitSelect(Select):
"""
A select widget with list of Polish administrative units as choices.
"""
def __init__(self, attrs=None):
from pl_administrativeunits import ADMINISTRATIVE_UNIT_CHOICES
super(PLAdministrativeUnitSelect, self).__init__(attrs, choices=ADMINISTRATIVE_UNIT_CHOICES)
class PLNationalIdentificationNumberField(RegexField):
"""
A form field that validates as Polish Identification Number (PESEL).
Checks the following rules:
* the length consist of 11 digits
* has a valid checksum
The algorithm is documented at http://en.wikipedia.org/wiki/PESEL.
"""
default_error_messages = {
'invalid': _(u'National Identification Number consists of 11 digits.'),
'checksum': _(u'Wrong checksum for the National Identification Number.'),
}
def __init__(self, *args, **kwargs):
super(PLNationalIdentificationNumberField, self).__init__(r'^\d{11}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLNationalIdentificationNumberField, self).clean(value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (1, 3, 7, 9, 1, 3, 7, 9, 1, 3, 1)
result = 0
for i in range(len(number)):
result += int(number[i]) * multiple_table[i]
return result % 10 == 0
class PLTaxNumberField(RegexField):
"""
A form field that validates as Polish Tax Number (NIP).
Valid forms are: XXX-XXX-YY-YY or XX-XX-YYY-YYY.
Checksum algorithm based on documentation at
http://wipos.p.lodz.pl/zylla/ut/nip-rego.html
"""
default_error_messages = {
'invalid': _(u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX or XX-XX-XXX-XXX.'),
'checksum': _(u'Wrong checksum for the Tax Number (NIP).'),
}
def __init__(self, *args, **kwargs):
super(PLTaxNumberField, self).__init__(r'^\d{3}-\d{3}-\d{2}-\d{2}$|^\d{2}-\d{2}-\d{3}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLTaxNumberField, self).clean(value)
value = re.sub("[-]", "", value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (6, 5, 7, 2, 3, 4, 5, 6, 7)
result = 0
for i in range(len(number)-1):
result += int(number[i]) * multiple_table[i]
result %= 11
if result == int(number[-1]):
return True
else:
return False
class PLNationalBusinessRegisterField(RegexField):
"""
A form field that validated as Polish National Official Business Register Number (REGON)
Valid forms are: 7 or 9 digits number
More on the field: http://www.stat.gov.pl/bip/regon_ENG_HTML.htm
The checksum algorithm is documented at http://wipos.p.lodz.pl/zylla/ut/nip-rego.html
"""
default_error_messages = {
'invalid': _(u'National Business Register Number (REGON) consists of 7 or 9 digits.'),
'checksum': _(u'Wrong checksum for the National Business Register Number (REGON).'),
}
def __init__(self, *args, **kwargs):
super(PLNationalBusinessRegisterField, self).__init__(r'^\d{7,9}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLNationalBusinessRegisterField, self).clean(value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table_7 = (2, 3, 4, 5, 6, 7)
multiple_table_9 = (8, 9, 2, 3, 4, 5, 6, 7)
result = 0
if len(number) == 7:
multiple_table = multiple_table_7
else:
multiple_table = multiple_table_9
for i in range(len(number)-1):
result += int(number[i]) * multiple_table[i]
result %= 11
if result == 10:
result = 0
if result == int(number[-1]):
return True
else:
return False
class PLPostalCodeField(RegexField):
"""
A form field that validates as Polish postal code.
Valid code is XX-XXX where X is digit.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(PLPostalCodeField, self).__init__(r'^\d{2}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
|
diofeher/django-nfa
|
django/contrib/localflavor/pl/forms.py
|
Python
|
bsd-3-clause
| 5,591
|
# Copyright (c) 2014, the GREAT3 executive committee (http://www.great3challenge.info/?q=contacts)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for making catalogs of galaxy fit data corresponding to a real galaxy training set used by
GalSim. It has to collect information from several large files."""
import pyfits
import numpy as np
# Define filenames, etc.
galsim_catfile = 'real_galaxy_catalog_23.5.fits'
fit_catfiles = ['BRIGHTtotalRAW00000.26113.fits',
'totalRAW00000.29949.fits.gz']
n_catfiles = len(fit_catfiles)
cosmos_catfile = 'lensing14.fits.gz'
out_fitfile = 'real_galaxy_catalog_23.5_fits.fits'
out_catfile = 'real_galaxy_catalog_23.5.fits'
# Read in real galaxy catalog.
galsim_cat = pyfits.getdata(galsim_catfile)
n_galsim_cat = len(galsim_cat)
print 'Read in ',n_galsim_cat,' from GalSim catalog ',galsim_catfile
galsim_ident = galsim_cat.field('ident')
# Fields: ('IDENT', 'RA', 'DEC', 'MAG', 'BAND', 'WEIGHT', 'GAL_FILENAME', 'PSF_FILENAME', 'GAL_HDU',
# 'PSF_HDU', 'PIXEL_SCALE', 'NOISE_MEAN', 'NOISE_VARIANCE')
# Read in the full COSMOS catalog.
cosmos_cat = pyfits.getdata(cosmos_catfile)
n_cosmos_cat = len(cosmos_cat)
print 'Read in ',n_cosmos_cat,' from COSMOS catalog ',cosmos_catfile
# Fields: ('IDENT', 'MAG_AUTO', 'FLUX_AUTO', 'MAGERR_AUTO', 'FLUX_RADIUS', 'FLUXERR_AUTO',
# 'KRON_RADIUS', 'MU_MAX', 'MU_CLASS', 'CLEAN', 'GOOD', 'FLAGS', 'SN', 'SN_NON_CORR', 'FWHM_IMAGE',
# 'ALPHA_J2000', 'DELTA_J2000', 'X_IMAGE', 'Y_IMAGE', 'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE',
# 'PETRO_RADIUS', 'RRG_XX', 'RRG_YY', 'XXC', 'YYC', 'XYC', 'D', 'E1_R', 'E2_R', 'E1_RU', 'E2_RU',
# 'GAMMA1', 'GAMMA2', 'FOCUS_MODEL', 'IXX', 'IYY', 'IXY', 'WEIGHT_FUNCT_RADIUS', 'VAR_E1', 'VAR_E2',
# 'BOX', 'SPECZ', 'SPECZ_MARA', 'SPECZ_CLASS', 'SPECZ_ORIGIN', 'GOOD_SPECZ', 'SPECZ_BL_AGN',
# 'SPECZ_SELECTION', 'MIPS_Z', 'MIPS_LOG_L', 'MIPS_MASS', 'ZEST_TYPE', 'ZEST_BULGE',
# 'ZEST_IRREGULARITY', 'ZEST_ELONGATION', 'ZEST_GINI', 'ZEST_M20', 'ZEST_CONCENTRATION',
# 'ZEST_ASYMMETRY', 'BULGE', 'KT', 'OLD_ZPHOT', 'OLD_GOOD_ZPHOT', 'HL_KPC', 'MARA_AGN',
# 'MARA_AGN_ZPHOT', 'MARA_AGN_ZPHOT_LOW68', 'MARA_AGN_ZPHOT_HIGH68', 'KNUD_AGN', 'G1_TS', 'G2_TS',
# 'WEIGHT_TS', 'CHANDRA_GOOD', 'CHANDRA_AGN', 'CHANDRA_LX_HARD', 'CHANDRA_LX_SOFT',
# 'CHANDRA_LX_FULL', 'CHANDRA_ZETA', 'CHANDRA_ZSPEC', 'CHANDRA_CLASSZSPEC', 'CHANDRA_MODEL',
# 'CHANDRA_XMM_ID', 'XMM_GOOD', 'XMM_AGN', 'XMM_LX_HARD', 'XMM_LX_SOFT', 'XMM_LX_FULL', 'XMM_ZETA',
# 'XMM_ZSPEC', 'XMM_CLASSZSPEC', 'XMM_MODEL', 'XMM_CHANDRA_ID', 'EZE_AGN_SPECZ', 'EZE_AGN_PHOTOZ',
# 'EZE_LX', 'EZE_HR', 'EZE_SPECZ', 'EZE_PHOTOZ', 'K_CFHT', 'MATCH_CFHT', 'ERR_K_CFHT',
# 'KEVIN_MSTAR', 'KEVIN_MSTAR2', 'KEVIN_MASSERR', 'OLIV_MSTAR', 'MVIR', 'COLOR', 'TYPE2_ZPHOT_MARA',
# 'PETER_PASSIVE', 'PETER_ANGLE_PA', 'PETER_ELLIP', 'PHOTOZ_ORDER', 'PHOTOZ_NON_COMB',
# 'PHOTOZ_NON_COMB_LOW_68', 'PHOTOZ_NON_COMB_HIGH_68', 'PBZK', 'PBZK_ZPHOT', 'PBZK_MK', 'PBZK_MASS',
# 'SIGNALTONOISERATIO', 'QUASIPETROSIANAREAFRACTION', 'QUASIPETROSIANFRACTION', 'AXISRATIO', 'GINI',
# 'CONCENTRATION', 'BOB_E', 'BOB_GOOD', 'BOB_S0', 'FLUX_GIM2D', 'R_GIM2D', 'ELL_GIM2D', 'PA_GIM2D',
# 'DX_GIM2D', 'DY_GIM2D', 'SERSIC_N_GIM2D', 'R_0P5_GIM2D', 'CHI_GIM2D', 'CECILE_SL_Z',
# 'CECILE_SL_SAT', 'CECILE_SL', 'CECILE_SL_FLAG1', 'CECILE_SL_FLAG2', 'ISOLATED', 'BCG_SCALE',
# 'BCG_R200', 'ALL_P_MEM', 'ALL_GROUP_ID', 'N_GROUP_OVERLAP', 'BEST_P_MEM', 'BEST_GROUP_ID',
# 'ZPHOT', 'TYPE', 'ZPDF', 'PHOTZ_LOW_68', 'PHOTZ_HIGH_68', 'CHI', 'MODD', 'EBV', 'NBFILT',
# 'ZMINCHI2', 'ZL68_MINCHI2', 'ZU68_MINCHI2', 'ZP2', 'CHI2', 'NUV', 'U', 'SUBARU_R', 'SUBARU_I',
# 'J_WFCAM', 'K_WIRCAM', 'M36', 'DNUV', 'DU', 'DJ_WFCAM', 'DK_WIRCAM', 'DM36', 'AUTO_OFFSET',
# 'AUTO_FLAG', 'MNUV', 'MU', 'MB', 'MV', 'MG', 'MR', 'MI', 'MJ', 'MK', 'MNUV_MR', 'SFR_MED',
# 'STR_INF', 'SFR_SUP', 'SSFR_MED', 'SSFR_INF', 'SSFR_SUP', 'MATCH_S', 'MASK_S', 'GOOD_ZPHOT_LENS',
# 'GOOD_ZPHOT_SOURCE')
# That's a lot of info, so let's just pick out the things we care about: galaxy identifier, apparent
# magnitude, size, photo-z.
cos_ident = cosmos_cat.field('ident')
cos_mag_auto = cosmos_cat.field('mag_auto')
cos_flux_rad = cosmos_cat.field('flux_radius')
cos_zphot = cosmos_cat.field('zphot')
# Read in catalogs with fit parameters from Lackner & Gunn.
print "Reading in catalogs of fit parameters"
n_fit_tot = 0
for i_cat in range(n_catfiles):
# Get this catalog
dat = pyfits.getdata(fit_catfiles[i_cat])
n = len(dat)
print "Read in ",n," fit results from file ",fit_catfiles[i_cat]
# Just extract the columns we want, and append to previous if i_cat!=0.
if i_cat == 0:
fit_ident = dat.field('ident')
fit_sersicfit = dat.field('sersicfit')
fit_bulgefit = dat.field('bulgefit')
fit_status = dat.field('mpfit_status')
fit_mag_auto = dat.field('mag_auto')
fit_mad_s = dat.field('mad_sersic_mask')
fit_mad_b = dat.field('mad_dvcb_mask')
fit_dvc_btt = dat.field('dvc_btt')
if i_cat > 0:
fit_ident = np.append(fit_ident, dat.field('galid'))
fit_sersicfit = np.append(fit_sersicfit, dat.field('sersicfit'), axis=0)
fit_bulgefit = np.append(fit_bulgefit, dat.field('bulgefit'), axis=0)
fit_status = np.append(fit_status, dat.field('mpfit_status'), axis=0)
fit_mag_auto = np.append(fit_mag_auto, np.zeros_like(dat.field('galid')), axis=0)
fit_mad_s = np.append(fit_mad_s, dat.field('mad_sersic_mask'), axis=0)
fit_mad_b = np.append(fit_mad_b, dat.field('mad_dvcb_mask'), axis=0)
fit_dvc_btt = np.append(fit_dvc_btt, dat.field('dvc_btt'), axis=0)
# Increment counter.
n_fit_tot += n
# Unfortunately, the files do not have the same column names. Here are their contents -
# Fields in first file: ('IDENT', 'MAG_AUTO', 'FLUX_AUTO', 'MAGERR_AUTO', 'FLUX_RADIUS',
# 'FLUXERR_AUTO', 'KRON_RADIUS', 'MU_MAX', 'MU_CLASS', 'CLEAN', 'GOOD', 'FLAGS', 'SN',
# 'SN_NON_CORR', 'FWHM_IMAGE', 'ALPHA_J2000', 'DELTA_J2000', 'X_IMAGE', 'Y_IMAGE', 'A_IMAGE',
# 'B_IMAGE', 'THETA_IMAGE', 'PETRO_RADIUS', 'D', 'E1_R', 'E2_R', 'E1_RU', 'E2_RU', 'GAMMA1',
# 'GAMMA2', 'FOCUS_MODEL', 'IXX', 'IYY', 'IXY', 'WEIGHT_FUNCT_RADIUS', 'VAR_E1', 'VAR_E2',
# 'BOX', 'SPECZ', 'SPECZ_MARA', 'SPECZ_CLASS', 'SPECZ_ORIGIN', 'GOOD_SPECZ', 'SPECZ_BL_AGN',
# 'FORS2_OBJECT_FLAG', 'MIPS_Z', 'MIPS_LOG_L', 'MIPS_MASS', 'ZEST_TYPE', 'ZEST_BULGE',
# 'ZEST_IRREGULARITY', 'ZEST_ELONGATION', 'ZEST_GINI', 'ZEST_M20', 'ZEST_CONCENTRATION',
# 'ZEST_ASYMMETRY', 'BULGE', 'KT', 'OLD_ZPHOT', 'OLD_GOOD_ZPHOT', 'HL_KPC', 'CHANDRA_GOOD',
# 'CHANDRA_AGN', 'CHANDRA_LX_HARD', 'CHANDRA_LX_SOFT', 'CHANDRA_LX_FULL', 'CHANDRA_ZETA',
# 'CHANDRA_ZSPEC', 'CHANDRA_CLASSZSPEC', 'CHANDRA_MODEL', 'CHANDRA_TYPE', 'CHANDRA_LUSSO_MASS',
# 'XMM_GOOD', 'XMM_AGN', 'XMM_LX_HARD', 'XMM_LX_SOFT', 'XMM_ZETA', 'XMM_ZSPEC',
# 'XMM_CLASSZSPEC', 'XMM_MODEL', 'XMM_TYPE', 'XMM_LUSSO_MASS', 'AGN_GOOD', 'AGN_Z', 'AGN_TYPE',
# 'AGN_LX', 'AGN_LX_SOFT', 'AGN_LX_HARD', 'AGN_LUSSO_MASS', 'BOSS_LRG', 'K_CFHT', 'MATCH_CFHT',
# 'ERR_K_CFHT', 'KEVIN_MSTAR', 'KEVIN_MSTAR2', 'KEVIN_MASSERR', 'KEVIN_QUENCH_FLAG', 'MVIR',
# 'TYPE2_ZPHOT_MARA', 'PHOTOZ_ORDER', 'PHOTOZ_NON_COMB', 'PHOTOZ_NON_COMB_LOW_68',
# 'PHOTOZ_NON_COMB_HIGH_68', 'FLUX_GIM2D', 'R_GIM2D', 'ELL_GIM2D', 'PA_GIM2D', 'DX_GIM2D',
# 'DY_GIM2D', 'SERSIC_N_GIM2D', 'R_0P5_GIM2D', 'CHI_GIM2D', 'CECILE_SL_Z', 'CECILE_SL_SAT',
# 'CECILE_SL', 'CECILE_SL_FLAG1', 'CECILE_SL_FLAG2', 'GROUP_PROJECTION_MMGG',
# 'GROUP_PROJECTION_MMGG_SPECZ', 'MMGG_SCALE', 'P_MEM_BEST', 'GROUP_ID_BEST', 'GROUP_FLAG_BEST',
# 'P_MEM_ALL', 'GROUP_ID_ALL', 'GROUP_FLAG_ALL', 'DIST_BCG_R200', 'MMGG_SCALE_SPECZ',
# 'P_MEM_BEST_SPECZ', 'GROUP_ID_BEST_SPECZ', 'GROUP_FLAG_BEST_SPECZ', 'P_MEM_ALL_SPECZ',
# 'GROUP_ID_ALL_SPECZ', 'GROUP_FLAG_ALL_SPECZ', 'DIST_BCG_R200_SPECZ', 'ZPHOT', 'TYPE', 'ZPDF',
# 'PHOTZ_LOW_68', 'PHOTZ_HIGH_68', 'CHI', 'MODD', 'EBV', 'NBFILT', 'ZMINCHI2', 'ZL68_MINCHI2',
# 'ZU68_MINCHI2', 'ZP2', 'CHI2', 'NUV', 'U', 'B', 'SUBARU_R', 'SUBARU_I', 'J_WFCAM', 'K_WIRCAM',
# 'M36', 'DNUV', 'DU', 'DJ_WFCAM', 'DK_WIRCAM', 'DM36', 'AUTO_OFFSET', 'AUTO_FLAG', 'MNUV',
# 'MU', 'MB', 'MV', 'MG', 'MR', 'MI', 'MJ', 'MK', 'MNUV_MR', 'SFR_MED', 'STR_INF', 'SFR_SUP',
# 'SSFR_MED', 'SSFR_INF', 'SSFR_SUP', 'MATCH_S', 'MASK_S', 'GOOD_ZPHOT_LENS',
# 'GOOD_ZPHOT_SOURCE', 'RA', 'DEC', 'GALID', 'BULGEFIT', 'DISKFIT', 'SERSICFIT', 'CHISQ_BULGE',
# 'CHISQ_DISK', 'CHISQ_SERSIC', 'COVAR_BULGE', 'COVAR_DISK', 'COVAR_SERSIC', 'PERR_BULGE',
# 'PERR_DISK', 'PERR_SERSIC', 'MPFIT_STATUS', 'DOF_BULGE', 'DOF_DISK', 'DOF_SERSIC', 'DOF_DVC',
# 'DOF_EXP', 'EXPFIT', 'DVCFIT', 'CHISQ_EXP', 'CHISQ_DVC', 'PERR_EXP', 'PERR_DVC', 'COVAR_EXP',
# 'COVAR_DVC', 'FRACDEV', 'XCROP', 'YCROP', 'XLEN', 'YLEN', 'DVC_BTT', 'EXP_BTT', 'MAD_SKY',
# 'MAD_SERSIC', 'MAD_SERSIC_MASK', 'MAD_DVCB', 'MAD_DVCB_MASK', 'MAD_EXPB', 'MAD_EXPB_MASK',
# 'MAD_EXP', 'MAD_EXP_MASK', 'MAD_DVC', 'MAD_DVC_MASK', 'CHISQ_BULGE_MASK', 'CHISQ_DISK_MASK',
# 'CHISQ_EXP_MASK', 'CHISQ_SERSIC_MASK', 'CHISQ_DVC_MASK', 'DOF_BULGE_MASK', 'DOF_DISK_MASK',
# 'DOF_EXP_MASK', 'DOF_SERSIC_MASK', 'DOF_DVC_MASK', 'SN_REFF_SERSIC', 'SKY_SERSIC',
# 'SKY_SERSIC_ERR', 'SKY_SERSIC_COVAR', 'DVC_BTT_ERR', 'EXP_BTT_ERR')
print "Read in ",n_fit_tot," from ",n_catfiles," fit files"
print "Making correspondence between IDENT values for all inputs"
cos_ind = np.zeros_like(galsim_ident)
fit_ind = np.zeros_like(galsim_ident)
cos_ident_list = list(cos_ident)
fit_ident_list = list(fit_ident)
n_fail_cos = 0
n_fail_fit = 0
for i in range(n_galsim_cat):
if i % 1000 == 0:
print "... object ",i
if galsim_ident[i] in cos_ident_list:
cos_ind[i] = cos_ident_list.index(galsim_ident[i])
else:
cos_ind[i] = -1
n_fail_cos += 1
if galsim_ident[i] in fit_ident_list:
fit_ind[i] = fit_ident_list.index(galsim_ident[i])
else:
fit_ind[i] = -1
n_fail_fit += 1
print "Number of match failures for COSMOS, fits: ",n_fail_cos, n_fail_fit
print "Rearranging arrays into proper order"
use_ind = (fit_ind >= 0) & (cos_ind >= 0)
out_ident = galsim_ident[use_ind]
print "Actually using ",len(out_ident)
out_mag_auto = cos_mag_auto[cos_ind[use_ind]]
out_flux_rad = cos_flux_rad[cos_ind[use_ind]]
out_zphot = cos_zphot[cos_ind[use_ind]]
test_mag_auto = fit_mag_auto[fit_ind[use_ind]]
print 'Mag auto test:'
print out_mag_auto[0:9]
print test_mag_auto[0:9]
# Rearrange the FIT arrays with fit quantities in the same order as galsim_ident.
out_sersicfit = fit_sersicfit[fit_ind[use_ind],:]
out_bulgefit = fit_bulgefit[fit_ind[use_ind],:]
out_fit_status = fit_status[fit_ind[use_ind],:]
out_fit_mad_s = fit_mad_s[fit_ind[use_ind],:]
out_fit_mad_b = fit_mad_b[fit_ind[use_ind],:]
out_fit_dvc_btt = fit_dvc_btt[fit_ind[use_ind],:]
# Make output data structure with IDENT, photo-z, magnitude, flux_radius, SERSICFIT, BULGEFIT, fit
# status. SERSICFIT and BULGEFIT are actually arrays of fit parameters from single Sersic fits and
# two-component fits, respectively.
tbhdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='IDENT',
format='J',
array=out_ident),
pyfits.Column(name='mag_auto',
format='D',
array=out_mag_auto),
pyfits.Column(name='flux_radius',
format='D',
array=out_flux_rad),
pyfits.Column(name='zphot',
format='D',
array=out_zphot),
pyfits.Column(name='sersicfit',
format='8D',
array=out_sersicfit),
pyfits.Column(name='bulgefit',
format='16D',
array=out_bulgefit),
pyfits.Column(name='fit_status',
format='5J',
array=out_fit_status),
pyfits.Column(name='fit_mad_s',
format='D',
array=out_fit_mad_s),
pyfits.Column(name='fit_mad_b',
format='D',
array=out_fit_mad_b),
pyfits.Column(name='fit_dvc_btt',
format='D',
array=out_fit_dvc_btt)]
))
# Write outputs.
print "Writing to file ",out_fitfile
tbhdu.writeto(out_fitfile, clobber=True)
# Write new subset of catalog file.
print "Re-writing to file ",out_catfile
galsim_cat = pyfits.BinTableHDU(galsim_cat[use_ind])
galsim_cat.writeto(out_catfile, clobber=True)
|
barnabytprowe/great3-public
|
inputs/galdata/make_fits_catalogs.py
|
Python
|
bsd-3-clause
| 15,061
|
# -*- coding: utf-8 -*-
import sys
from django.contrib.admin.helpers import AdminForm
from django.utils.decorators import method_decorator
from django.db import transaction
from django.utils import simplejson
from django.views.decorators.clickjacking import xframe_options_sameorigin
from cms.constants import PLUGIN_COPY_ACTION, PLUGIN_MOVE_ACTION
from cms.exceptions import PluginLimitReached
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_pool import plugin_pool
from cms.utils import cms_static_url, get_cms_setting
from cms.utils.compat.dj import force_unicode
from cms.plugins.utils import has_reached_plugin_limit, requires_reload
from django.contrib.admin import ModelAdmin
from django.http import HttpResponse, Http404, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.template.defaultfilters import force_escape, escapejs
from django.utils.translation import ugettext as _, get_language
from django.conf import settings
from django.views.decorators.http import require_POST
import warnings
from django.template.response import TemplateResponse
from django.contrib.admin.util import get_deleted_objects
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.db import router
from django.http import HttpResponseRedirect
from cms.utils import copy_plugins, permissions, get_language_from_request
from cms.utils.i18n import get_language_list
class FrontendEditableAdmin(object):
frontend_editable_fields = []
def get_urls(self):
"""
Register the url for the single field edit view
"""
from django.conf.urls import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns(
'',
pat(r'edit-field/([0-9]+)/([a-z\-]+)/$', self.edit_field),
)
return url_patterns + super(FrontendEditableAdmin, self).get_urls()
def _get_object_for_single_field(self, object_id, language):
# Quick and dirty way to retrieve objects for django-hvad
# Cleaner implementation will extend this method in a child mixin
try:
return self.model.objects.language(language).get(pk=object_id)
except AttributeError:
return self.model.objects.get(pk=object_id)
def edit_field(self, request, object_id, language):
obj = self._get_object_for_single_field(object_id, language)
opts = obj.__class__._meta
saved_successfully = False
cancel_clicked = request.POST.get("_cancel", False)
raw_fields = request.GET.get("edit_fields")
fields = [field for field in raw_fields.split(",") if field in self.frontend_editable_fields]
if not fields:
return HttpResponseBadRequest(_("Fields %s not editabled in the frontend") % raw_fields)
if not request.user.has_perm("%s_change" % self.model._meta.module_name):
return HttpResponseForbidden(_("You do not have permission to edit this item"))
# Dinamically creates the form class with only `field_name` field
# enabled
form_class = self.get_form(request, obj, fields=fields)
if not cancel_clicked and request.method == 'POST':
form = form_class(instance=obj, data=request.POST)
if form.is_valid():
form.save()
saved_successfully = True
else:
form = form_class(instance=obj)
admin_form = AdminForm(form, fieldsets=[(None, {'fields': fields})], prepopulated_fields={},
model_admin=self)
media = self.media + admin_form.media
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'title': opts.verbose_name,
'plugin': None,
'plugin_id': None,
'adminform': admin_form,
'add': False,
'is_popup': True,
'media': media,
'opts': opts,
'change': True,
'save_as': False,
'has_add_permission': False,
'window_close_timeout': 10,
}
if cancel_clicked:
# cancel button was clicked
context.update({
'cancel': True,
})
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
if not cancel_clicked and request.method == 'POST' and saved_successfully:
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
return render_to_response('admin/cms/page/plugin/change_form.html', context, RequestContext(request))
class PlaceholderAdmin(ModelAdmin):
def get_urls(self):
"""
Register the plugin specific urls (add/edit/copy/remove/move)
"""
from django.conf.urls import patterns, url
info = "%s_%s" % (self.model._meta.app_label, self.model._meta.module_name)
pat = lambda regex, fn: url(regex, self.admin_site.admin_view(fn), name='%s_%s' % (info, fn.__name__))
url_patterns = patterns(
'',
pat(r'copy-plugins/$', self.copy_plugins),
pat(r'add-plugin/$', self.add_plugin),
pat(r'edit-plugin/([0-9]+)/$', self.edit_plugin),
pat(r'delete-plugin/([0-9]+)/$', self.delete_plugin),
pat(r'clear-placeholder/([0-9]+)/$', self.clear_placeholder),
pat(r'move-plugin/$', self.move_plugin),
)
return url_patterns + super(PlaceholderAdmin, self).get_urls()
def has_add_plugin_permission(self, request, placeholder, plugin_type):
if not permissions.has_plugin_permission(request.user, plugin_type, "add"):
return False
if not placeholder.has_add_permission(request):
return False
return True
def has_copy_plugin_permission(self, request, source_placeholder, target_placeholder, plugins):
if not source_placeholder.has_add_permission(request) or not target_placeholder.has_add_permission(
request):
return False
for plugin in plugins:
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "add"):
return False
return True
def has_change_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
if not plugin.placeholder.has_change_permission(request):
return False
return True
def has_move_plugin_permission(self, request, plugin, target_placeholder):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "change"):
return False
if not target_placeholder.has_change_permission(request):
return False
return True
def has_delete_plugin_permission(self, request, plugin):
if not permissions.has_plugin_permission(request.user, plugin.plugin_type, "delete"):
return False
placeholder = plugin.placeholder
if not placeholder.has_delete_permission(request):
return False
return True
def has_clear_placeholder_permission(self, request, placeholder):
if not placeholder.has_delete_permission(request):
return False
return True
def post_add_plugin(self, request, placeholder, plugin):
pass
def post_copy_plugins(self, request, source_placeholder, target_placeholder, plugins):
pass
def post_edit_plugin(self, request, plugin):
pass
def post_move_plugin(self, request, plugin):
pass
def post_delete_plugin(self, request, plugin):
pass
def post_clear_placeholder(self, request, placeholder):
pass
def get_placeholder_template(self, request, placeholder):
pass
@method_decorator(require_POST)
@xframe_options_sameorigin
def add_plugin(self, request):
"""
POST request should have the following data:
- placeholder_id
- plugin_type
- plugin_language
- plugin_parent (optional)
"""
plugin_type = request.POST['plugin_type']
placeholder_id = request.POST.get('placeholder_id', None)
parent_id = request.POST.get('parent_id', None)
if parent_id:
warnings.warn("parent_id is deprecated and will be removed in 3.0.1, use plugin_parent instead",
DeprecationWarning)
if not parent_id:
parent_id = request.POST.get('plugin_parent', None)
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
if not self.has_add_plugin_permission(request, placeholder, plugin_type):
return HttpResponseForbidden(_('You do not have permission to add a plugin'))
parent = None
language = request.POST.get('plugin_language') or get_language_from_request(request)
try:
has_reached_plugin_limit(placeholder, plugin_type, language,
template=self.get_placeholder_template(request, placeholder))
except PluginLimitReached as er:
return HttpResponseBadRequest(er)
# page add-plugin
if not parent_id:
position = request.POST.get('plugin_order',
CMSPlugin.objects.filter(language=language, placeholder=placeholder).count())
# in-plugin add-plugin
else:
parent = get_object_or_404(CMSPlugin, pk=parent_id)
placeholder = parent.placeholder
position = request.POST.get('plugin_order',
CMSPlugin.objects.filter(language=language, parent=parent).count())
# placeholder (non-page) add-plugin
# Sanity check to make sure we're not getting bogus values from JavaScript:
if settings.USE_I18N:
if not language or not language in [lang[0] for lang in settings.LANGUAGES]:
return HttpResponseBadRequest(_("Language must be set to a supported language!"))
if parent and parent.language != language:
return HttpResponseBadRequest(_("Parent plugin language must be same as language!"))
else:
language = settings.LANGUAGE_CODE
plugin = CMSPlugin(language=language, plugin_type=plugin_type, position=position, placeholder=placeholder)
if parent:
plugin.position = CMSPlugin.objects.filter(parent=parent).count()
plugin.insert_at(parent, position='last-child', save=False)
plugin.save()
self.post_add_plugin(request, placeholder, plugin)
response = {
'url': force_unicode(
reverse("admin:%s_%s_edit_plugin" % (self.model._meta.app_label, self.model._meta.module_name),
args=[plugin.pk])),
'breadcrumb': plugin.get_breadcrumb(),
}
return HttpResponse(simplejson.dumps(response), content_type='application/json')
@method_decorator(require_POST)
@xframe_options_sameorigin
@transaction.commit_on_success
def copy_plugins(self, request):
"""
POST request should have the following data:
- source_language
- source_placeholder_id
- source_plugin_id (optional)
- target_language
- target_placeholder_id
- target_plugin_id (optional, new parent)
"""
source_language = request.POST['source_language']
source_placeholder_id = request.POST['source_placeholder_id']
source_plugin_id = request.POST.get('source_plugin_id', None)
target_language = request.POST['target_language']
target_placeholder_id = request.POST['target_placeholder_id']
target_plugin_id = request.POST.get('target_plugin_id', None)
source_placeholder = get_object_or_404(Placeholder, pk=source_placeholder_id)
target_placeholder = get_object_or_404(Placeholder, pk=target_placeholder_id)
if not target_language or not target_language in get_language_list():
return HttpResponseBadRequest(_("Language must be set to a supported language!"))
if source_plugin_id:
source_plugin = get_object_or_404(CMSPlugin, pk=source_plugin_id)
reload_required = requires_reload(PLUGIN_COPY_ACTION, [source_plugin])
plugins = list(
source_placeholder.cmsplugin_set.filter(tree_id=source_plugin.tree_id, lft__gte=source_plugin.lft,
rght__lte=source_plugin.rght).order_by('tree_id', 'level', 'position'))
else:
plugins = list(
source_placeholder.cmsplugin_set.filter(language=source_language).order_by('tree_id', 'level', 'position'))
reload_required = requires_reload(PLUGIN_COPY_ACTION, plugins)
if not self.has_copy_plugin_permission(request, source_placeholder, target_placeholder, plugins):
return HttpResponseForbidden(_('You do not have permission to copy these plugins.'))
copy_plugins.copy_plugins_to(plugins, target_placeholder, target_language, target_plugin_id)
plugin_list = CMSPlugin.objects.filter(language=target_language, placeholder=target_placeholder).order_by(
'tree_id', 'level', 'position')
reduced_list = []
for plugin in plugin_list:
reduced_list.append(
{'id': plugin.pk, 'type': plugin.plugin_type, 'parent': plugin.parent_id, 'position': plugin.position,
'desc': force_unicode(plugin.get_short_description())})
self.post_copy_plugins(request, source_placeholder, target_placeholder, plugins)
json_response = {'plugin_list': reduced_list, 'reload': reload_required}
return HttpResponse(simplejson.dumps(json_response), content_type='application/json')
@xframe_options_sameorigin
def edit_plugin(self, request, plugin_id):
plugin_id = int(plugin_id)
cms_plugin = get_object_or_404(CMSPlugin.objects.select_related('placeholder'), pk=plugin_id)
instance, plugin_admin = cms_plugin.get_plugin_instance(self.admin_site)
if not self.has_change_plugin_permission(request, cms_plugin):
return HttpResponseForbidden(_("You do not have permission to edit this plugin"))
plugin_admin.cms_plugin_instance = cms_plugin
try:
plugin_admin.placeholder = cms_plugin.placeholder
except Placeholder.DoesNotExist:
pass
if request.method == "POST":
# set the continue flag, otherwise will plugin_admin make redirect to list
# view, which actually doesn't exists
request.POST['_continue'] = True
if request.POST.get("_cancel", False):
# cancel button was clicked
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': cms_plugin,
'is_popup': True,
"type": cms_plugin.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(escapejs(cms_plugin.get_instance_icon_src())),
'alt': force_escape(escapejs(cms_plugin.get_instance_icon_alt())),
'cancel': True,
}
instance = cms_plugin.get_plugin_instance()[0]
if instance:
context['name'] = force_unicode(instance)
else:
# cancelled before any content was added to plugin
cms_plugin.delete()
context.update({
"deleted": True,
'name': force_unicode(cms_plugin),
})
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
if not instance:
# instance doesn't exist, call add view
response = plugin_admin.add_view(request)
else:
# already saved before, call change view
# we actually have the instance here, but since i won't override
# change_view method, is better if it will be loaded again, so
# just pass id to plugin_admin
response = plugin_admin.change_view(request, str(plugin_id))
if request.method == "POST" and plugin_admin.object_successfully_changed:
self.post_edit_plugin(request, plugin_admin.saved_object)
saved_object = plugin_admin.saved_object
context = {
'CMS_MEDIA_URL': get_cms_setting('MEDIA_URL'),
'plugin': saved_object,
'is_popup': True,
'name': force_unicode(saved_object),
"type": saved_object.get_plugin_name(),
'plugin_id': plugin_id,
'icon': force_escape(saved_object.get_instance_icon_src()),
'alt': force_escape(saved_object.get_instance_icon_alt()),
}
return render_to_response('admin/cms/page/plugin/confirm_form.html', context, RequestContext(request))
return response
@method_decorator(require_POST)
@xframe_options_sameorigin
def move_plugin(self, request):
"""
POST request with following parameters:
-plugin_id
-placeholder_id
-plugin_language (optional)
-plugin_parent (optional)
-plugin_order (array, optional)
"""
plugin = CMSPlugin.objects.get(pk=int(request.POST['plugin_id']))
placeholder = Placeholder.objects.get(pk=request.POST['placeholder_id'])
parent_id = request.POST.get('plugin_parent', None)
language = request.POST.get('plugin_language', plugin.language)
if not parent_id:
parent_id = None
else:
parent_id = int(parent_id)
order = request.POST.getlist("plugin_order[]")
if not self.has_move_plugin_permission(request, plugin, placeholder):
return HttpResponseForbidden(_("You have no permission to move this plugin"))
if plugin.parent_id != parent_id:
if parent_id:
parent = CMSPlugin.objects.get(pk=parent_id)
if parent.placeholder_id != placeholder.pk:
return HttpResponseBadRequest('parent must be in the same placeholder')
if parent.language != language:
return HttpResponseBadRequest('parent must be in the same language as plugin_language')
else:
parent = None
plugin.move_to(parent, position='last-child')
try:
template = self.get_placeholder_template(request, placeholder)
has_reached_plugin_limit(placeholder, plugin.plugin_type, plugin.language, template=template)
except PluginLimitReached as er:
return HttpResponseBadRequest(er)
plugin.save()
for child in plugin.get_descendants(include_self=True):
child.placeholder = placeholder
child.language = language
child.save()
plugins = CMSPlugin.objects.filter(parent=parent_id, placeholder=placeholder)
for level_plugin in plugins:
x = 0
for pk in order:
if level_plugin.pk == int(pk):
level_plugin.position = x
level_plugin.save()
break
x += 1
self.post_move_plugin(request, plugin)
json_response = {'reload': requires_reload(PLUGIN_MOVE_ACTION, [plugin])}
return HttpResponse(simplejson.dumps(json_response), content_type='application/json')
@xframe_options_sameorigin
def delete_plugin(self, request, plugin_id):
plugin = get_object_or_404(CMSPlugin.objects.select_related('placeholder'), pk=plugin_id)
if not self.has_delete_plugin_permission(request, plugin):
return HttpResponseForbidden(_("You do not have permission to delete this plugin"))
plugin_cms_class = plugin.get_plugin_class()
plugin_class = plugin_cms_class.model
opts = plugin_class._meta
using = router.db_for_write(plugin_class)
app_label = opts.app_label
(deleted_objects, perms_needed, protected) = get_deleted_objects(
[plugin], opts, request.user, self.admin_site, using)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
raise PermissionDenied(_("You do not have permission to delete this plugin"))
obj_display = force_unicode(plugin)
self.log_deletion(request, plugin, obj_display)
plugin.delete()
self.message_user(request, _('The %(name)s plugin "%(obj)s" was deleted successfully.') % {
'name': force_unicode(opts.verbose_name), 'obj': force_unicode(obj_display)})
self.post_delete_plugin(request, plugin)
return HttpResponseRedirect(reverse('admin:index', current_app=self.admin_site.name))
plugin_name = force_unicode(plugin_pool.get_plugin(plugin.plugin_type).name)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": plugin_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": plugin_name,
"object": plugin,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
}
return TemplateResponse(request, "admin/cms/page/plugin/delete_confirmation.html", context,
current_app=self.admin_site.name)
@xframe_options_sameorigin
def clear_placeholder(self, request, placeholder_id):
placeholder = get_object_or_404(Placeholder, pk=placeholder_id)
if not self.has_clear_placeholder_permission(request, placeholder):
return HttpResponseForbidden(_("You do not have permission to clear this placeholder"))
plugins = placeholder.get_plugins()
opts = Placeholder._meta
using = router.db_for_write(Placeholder)
app_label = opts.app_label
(deleted_objects, perms_needed, protected) = get_deleted_objects(
plugins, opts, request.user, self.admin_site, using)
obj_display = force_unicode(placeholder)
if request.POST: # The user has already confirmed the deletion.
if perms_needed:
return HttpResponseForbidden(_("You do not have permission to clear this placeholder"))
self.log_deletion(request, placeholder, obj_display)
for plugin in plugins:
plugin.delete()
self.message_user(request, _('The placeholder "%(obj)s" was cleared successfully.') % {
'obj': force_unicode(obj_display)})
self.post_clear_placeholder(request, placeholder)
return HttpResponseRedirect(reverse('admin:index', current_app=self.admin_site.name))
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": obj_display}
else:
title = _("Are you sure?")
context = {
"title": title,
"object_name": _("placeholder"),
"object": placeholder,
"deleted_objects": deleted_objects,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
}
return TemplateResponse(request, "admin/cms/page/plugin/delete_confirmation.html", context,
current_app=self.admin_site.name)
class LanguageTabsAdmin(ModelAdmin):
render_placeholder_language_tabs = True
# change_form_template = 'admin/placeholders/placeholder/change_form.html'
def get_language_from_request(self, request):
language = request.REQUEST.get('language', None)
if not language:
language = get_language()
return language
def placeholder_plugin_filter(self, request, queryset):
if self.render_placeholder_language_tabs:
language = self.get_language_from_request(request)
if language:
queryset = queryset.filter(language=language)
return queryset
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context.update(self.language_tab_context(request))
tab_language = request.GET.get("language", None)
response = super(PlaceholderAdmin, self).change_view(
request, object_id, form_url=form_url, extra_context=extra_context)
if tab_language and response.status_code == 302 and response._headers['location'][1] == request.path:
location = response._headers['location']
response._headers['location'] = (location[0], "%s?language=%s" % (location[1], tab_language))
return response
def language_tab_context(self, request):
language = self.get_language_from_request(request)
languages = [(lang, lang_name) for lang, lang_name in settings.LANGUAGES]
context = {
'language': language,
'language_tabs': languages,
'show_language_tabs': len(languages) > 1 and self.render_placeholder_language_tabs,
}
return context
|
SinnerSchraderMobileMirrors/django-cms
|
cms/admin/placeholderadmin.py
|
Python
|
bsd-3-clause
| 25,921
|
import math
import json
import os
import pytest
import rti_python.ADCP.AdcpCommands
def calculate_predicted_range(**kwargs):
"""
:param SystemFrequency=: System frequency for this configuration.
:param CWPON=: Flag if Water Profile is turned on.
:param CWPBL=: WP Blank in meters.
:param CWPBS=: WP bin size in meters.
:param CWPBN=: Number of bins.
:param CWPBB_LagLength=: WP lag length in meters.
:param CWPBB=: WP broadband or narrowband.
:param CWPP=: Number of pings to average.
:param CWPTBP=: Time between each ping in the average.
:param CBTON=: Is Bottom Track turned on.
:param CBTBB=: BT broadband or narrowband.
:param BeamAngle=: Beam angle in degrees. Default 20 degrees.
:param BeamDiameter=: The beam diameter in meters.
:param CyclesPerElement=: Cycles per element.
:param Salinity=: Salinity in ppt.
:param Temperature=: Temperature in C.
:param XdcrDepth=: Tranducer Depth in meter.
:return: BT Range, WP Range, Range First Bin, Configured Ranges
"""
# Get the configuration from the json file
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error opening JSON file Range", e)
return (0.0, 0.0, 0.0, 0.0)
return _calculate_predicted_range(kwargs.pop('CWPON', config['DEFAULT']['CWPON']),
kwargs.pop('CWPBB', config['DEFAULT']['CWPBB']),
kwargs.pop('CWPBS', config['DEFAULT']['CWPBS']),
kwargs.pop('CWPBN', config['DEFAULT']['CWPBN']),
kwargs.pop('CWPBL', config['DEFAULT']['CWPBL']),
kwargs.pop('CBTON', config['DEFAULT']['CBTON']),
kwargs.pop('CBTBB', config['DEFAULT']['CBTBB']),
kwargs.pop('SystemFrequency', config['DEFAULT']['SystemFrequency']),
kwargs.pop('BeamDiameter', config["BeamDiameter"]),
kwargs.pop('CyclesPerElement', config["CyclesPerElement"]),
kwargs.pop('BeamAngle', config["BeamAngle"]),
kwargs.pop('SpeedOfSound', config["SpeedOfSound"]),
kwargs.pop('CWPBB_LagLength', config["DEFAULT"]["CWPBB_LagLength"]),
kwargs.pop('BroadbandPower', config["BroadbandPower"]),
kwargs.pop('Salinity', config["Salinity"]),
kwargs.pop('Temperature', config["Temperature"]),
kwargs.pop('XdcrDepth', config["XdcrDepth"]))
def _calculate_predicted_range(_CWPON_, _CWPBB_TransmitPulseType_, _CWPBS_, _CWPBN_, _CWPBL_,
_CBTON_, _CBTBB_TransmitPulseType_,
_SystemFrequency_, _BeamDiameter_, _CyclesPerElement_,
_BeamAngle_, _SpeedOfSound_, _CWPBB_LagLength_, _BroadbandPower_,
_Salinity_, _Temperature_, _XdcrDepth_):
"""
Get the predicted ranges for the given setup. This will use the parameter given to calculate
the bottom track predicted range, the water profile predicted range, range to the first bin and
the configured range. All results are in meters.
All values with underscores before and after the name are given variables by the user. All caps
variables are given by the JSON configuration. All other variables are calculated.
:param _CWPON_: Flag if Water Profile is turned on.
:param _CWPBB_TransmitPulseType_: WP broadband or narrowband.
:param _CWPBB_LagLength_: WP lag length in meters.
:param _CWPBS_: Bin size in meters.
:param _CWPBN_: Number of bins.
:param _CWPBL_: Blank distance in meters.
:param _CBTON_: Flag if Bottom Track is turned on.
:param _CBTBB_TransmitPulseType_: BT broadband or narrowband.
:param _SystemFrequency_: System frequency in hz.
:param _BeamDiameter_: Beam diameter in meters.
:param _CyclesPerElement_: Cycles per element.
:param _BeamAngle_: Beam angle in degrees.
:param _SpeedOfSound_: Speed of sound in m/s.
:param _BroadbandPower_: Broadband power.
:param _Salinity_: Salinity in ppt.
:param _Temperature_: Temperature in C.
:param _XdcrDepth_: Transducer Depth in meter.
:return: BT Range, WP Range, Range First Bin, Configured Range
"""
script_dir = os.path.dirname(__file__)
json_file_path = os.path.join(script_dir, 'predictor.json')
try:
# Get the configuration from the json file
config = json.loads(open(json_file_path).read())
except Exception as e:
print("Error getting the configuration file. Range", e)
return (0.0, 0.0, 0.0, 0.0)
# Speed of sound must be a value
if _SpeedOfSound_ == 0:
_SpeedOfSound_ = 1490
# Wave length
waveLength = _SpeedOfSound_ / _SystemFrequency_
# DI
dI = 0.0
if waveLength == 0:
dI = 0.0
else:
dI = 20.0 * math.log10(math.pi * _BeamDiameter_ / waveLength)
# Absorption
absorption = calc_absorption(_SystemFrequency_, _SpeedOfSound_, _Salinity_, _Temperature_, _XdcrDepth_)
# 1200khz
btRange_1200000 = 0.0
wpRange_1200000 = 0.0
refBin_1200000 = 0.0
xmtW_1200000 = 0.0
rScale_1200000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["1200000"]["BEAM_ANGLE"] / 180.0 * math.pi);
dI_1200000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["1200000"]["DIAM"] / waveLength);
dB_1200000 = 0.0;
if (config["DEFAULT"]["1200000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_1200000 = 0.0
else:
dB_1200000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["1200000"]["BIN"]) + dI - dI_1200000 - 10.0 * math.log10(config["DEFAULT"]["1200000"]["CPE"] / _CyclesPerElement_)
absorption_range_1200000 = config["DEFAULT"]["1200000"]["RANGE"] + ((config["DEFAULT"]["1200000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["1200000"]["RANGE"])
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]:
# Ref in and xmt watt
refBin_1200000 = config["DEFAULT"]["1200000"]["BIN"]
xmtW_1200000 = config["DEFAULT"]["1200000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_1200000 = 2.0 * rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000 + 15.0 * config["DEFAULT"]["1200000"]["BIN"])
else:
btRange_1200000 = 2.0 * rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000)
else:
btRange_1200000 = 0.0
if _CWPON_:
# Check if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_1200000 = rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["1200000"]["BIN"])
else:
wpRange_1200000 = rScale_1200000 * (absorption_range_1200000 + config["DEFAULT"]["1200000"]["BIN"] * dB_1200000)
else:
wpRange_1200000 = 0.0
else:
btRange_1200000 = 0.0
wpRange_1200000 = 0.0
# 600khz
btRange_600000 = 0.0
wpRange_600000 = 0.0
refBin_600000 = 0.0
xmtW_600000 = 0.0
rScale_600000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["600000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_600000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["600000"]["DIAM"] / waveLength)
dB_600000 = 0.0;
if config["DEFAULT"]["600000"]["BIN"] == 0 or _CyclesPerElement_ == 0:
dB_600000 = 0.0;
else:
dB_600000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["600000"]["BIN"]) + dI - dI_600000 - 10.0 * math.log10(config["DEFAULT"]["600000"]["CPE"] / _CyclesPerElement_)
absorption_range_600000 = config["DEFAULT"]["600000"]["RANGE"] + ((config["DEFAULT"]["600000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["600000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["600000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["1200000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_600000 = config["DEFAULT"]["600000"]["BIN"];
xmtW_600000 = config["DEFAULT"]["600000"]["XMIT_W"];
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_600000 = 2.0 * rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000 + 15.0 * config["DEFAULT"]["600000"]["BIN"] )
else:
btRange_600000 = 2.0 * rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000)
else:
btRange_600000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_600000 = rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["600000"]["BIN"] )
else:
wpRange_600000 = rScale_600000 * (absorption_range_600000 + config["DEFAULT"]["600000"]["BIN"] * dB_600000)
else:
wpRange_600000 = 0.0
else:
btRange_600000 = 0.0
wpRange_600000 = 0.0
# 300khz
btRange_300000 = 0.0
wpRange_300000 = 0.0
refBin_300000 = 0.0
xmtW_300000 = 0.0
rScale_300000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["300000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_300000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["300000"]["DIAM"] / waveLength)
dB_300000 = 0.0
if (config["DEFAULT"]["300000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_300000 = 0.0
else:
dB_300000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["300000"]["BIN"]) + dI - dI_300000 - 10.0 * math.log10(config["DEFAULT"]["300000"]["CPE"] / _CyclesPerElement_)
absorption_range_300000 = config["DEFAULT"]["300000"]["RANGE"] + ((config["DEFAULT"]["300000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["300000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["300000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["600000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_300000 = config["DEFAULT"]["300000"]["BIN"]
xmtW_300000 = config["DEFAULT"]["300000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_300000 = 2.0 * rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000 + 15.0 * config["DEFAULT"]["300000"]["BIN"])
else:
btRange_300000 = 2.0 * rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000)
else:
btRange_300000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_300000 = rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["300000"]["BIN"])
else:
wpRange_300000 = rScale_300000 * (absorption_range_300000 + config["DEFAULT"]["300000"]["BIN"] * dB_300000)
else:
wpRange_300000 = 0.0
else:
# Return 0 if not selected
btRange_300000 = 0.0
wpRange_300000 = 0.0
# 150khz
btRange_150000 = 0.0
wpRange_150000 = 0.0
refBin_150000 = 0.0
xmtW_150000 = 0.0
rScale_150000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["150000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_150000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["150000"]["DIAM"] / waveLength)
dB_150000 = 0.0;
if (config["DEFAULT"]["150000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_150000 = 0.0
else:
dB_150000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["150000"]["BIN"]) + dI - dI_150000 - 10.0 * math.log10(config["DEFAULT"]["150000"]["CPE"] / _CyclesPerElement_)
absorption_range_150000 = config["DEFAULT"]["150000"]["RANGE"] + ((config["DEFAULT"]["150000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["150000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["150000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["300000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_150000 = config["DEFAULT"]["150000"]["BIN"]
xmtW_150000 = config["DEFAULT"]["150000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_150000 = 2.0 * rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000 + 15.0 * config["DEFAULT"]["150000"]["BIN"])
else:
btRange_150000 = 2.0 * rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000)
else:
btRange_150000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_150000 = rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["150000"]["BIN"])
else:
wpRange_150000 = rScale_150000 * (absorption_range_150000 + config["DEFAULT"]["150000"]["BIN"] * dB_150000)
else:
wpRange_150000 = 0.0
else:
# Return 0 if not selected
btRange_150000 = 0.0
wpRange_150000 = 0.0
# 75khz
btRange_75000 = 0.0
wpRange_75000 = 0.0
refBin_75000 = 0.0
xmtW_75000 = 0.0
rScale_75000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["75000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_75000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["75000"]["DIAM"] / waveLength)
dB_75000 = 0.0;
if (config["DEFAULT"]["75000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_75000 = 0.0
else:
dB_75000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["75000"]["BIN"]) + dI - dI_75000 - 10.0 * math.log10(config["DEFAULT"]["75000"]["CPE"] / _CyclesPerElement_)
absorption_range_75000 = config["DEFAULT"]["75000"]["RANGE"] + ((config["DEFAULT"]["75000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["75000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["75000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["150000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_75000 = config["DEFAULT"]["75000"]["BIN"]
xmtW_75000 = config["DEFAULT"]["75000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_75000 = 2.0 * rScale_75000 * (absorption_range_75000 + config["DEFAULT"]["75000"]["BIN"] * dB_75000 + 15.0 * config["DEFAULT"]["75000"]["BIN"])
else:
btRange_75000 = 2.0 * rScale_75000 * (absorption_range_75000 + config["DEFAULT"]["75000"]["BIN"] * dB_75000)
else:
btRange_75000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_75000 = rScale_75000 * (absorption_range_75000 + config["DEFAULT"]["75000"]["BIN"] * dB_75000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["75000"]["BIN"])
else:
wpRange_75000 = rScale_75000 * (absorption_range_75000 + config["DEFAULT"]["75000"]["BIN"] * dB_75000)
else:
wpRange_75000 = 0.0;
else:
# Return 0 if not selected
btRange_75000 = 0.0
wpRange_75000 = 0.0
# 38khz
btRange_38000 = 0.0
wpRange_38000 = 0.0
refBin_38000 = 0.0
xmtW_38000 = 0.0
rScale_38000 = math.cos(_BeamAngle_ / 180.0 * math.pi) / math.cos(config["DEFAULT"]["38000"]["BEAM_ANGLE"] / 180.0 * math.pi)
dI_38000 = 20.0 * math.log10(math.pi * config["DEFAULT"]["38000"]["DIAM"] / waveLength)
dB_38000 = 0.0;
if (config["DEFAULT"]["38000"]["BIN"] == 0) or (_CyclesPerElement_ == 0):
dB_38000 = 0.0
else:
dB_38000 = 10.0 * math.log10(_CWPBS_ / config["DEFAULT"]["38000"]["BIN"]) + dI - dI_38000 - 10.0 * math.log10(config["DEFAULT"]["38000"]["CPE"] / _CyclesPerElement_)
absorption_range_38000 = config["DEFAULT"]["38000"]["RANGE"] + ((config["DEFAULT"]["38000"]["ABSORPTION_SCALE"] - absorption) * config["DEFAULT"]["38000"]["RANGE"])
if (_SystemFrequency_ > config["DEFAULT"]["38000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["75000"]["FREQ"]):
# Ref Bin and xmt watt
refBin_38000 = config["DEFAULT"]["38000"]["BIN"]
xmtW_38000 = config["DEFAULT"]["38000"]["XMIT_W"]
if _CBTON_:
# Check if NB
if _CBTBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCBTBB_Mode.NARROWBAND_LONG_RANGE.value:
btRange_38000 = 2.0 * rScale_38000 * (absorption_range_38000 + config["DEFAULT"]["38000"]["BIN"] * dB_38000 + 15.0 * config["DEFAULT"]["38000"]["BIN"]);
else:
btRange_38000 = 2.0 * rScale_38000 * (absorption_range_38000 + config["DEFAULT"]["38000"]["BIN"] * dB_38000)
else:
btRange_38000 = 0.0
if _CWPON_:
# Checck if NB
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
wpRange_38000 = rScale_38000 * (absorption_range_38000 + config["DEFAULT"]["38000"]["BIN"] * dB_38000 + config["DEFAULT"]["NB_PROFILE_REF"] * config["DEFAULT"]["38000"]["BIN"])
else:
wpRange_38000 = rScale_38000 * (absorption_range_38000 + config["DEFAULT"]["38000"]["BIN"] * dB_38000)
else:
wpRange_38000 = 0.0
else:
# Return 0 if not selected
btRange_38000 = 0.0
wpRange_38000 = 0.0
# Sample Rate
sumSampling = 0.0;
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]: # 1200 khz
sumSampling += config["DEFAULT"]["1200000"]["SAMPLING"] * config["DEFAULT"]["1200000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["600000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["1200000"]["FREQ"]): # 600 khz
sumSampling += config["DEFAULT"]["600000"]["SAMPLING"] * config["DEFAULT"]["600000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["300000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["600000"]["FREQ"]): # 300 khz
sumSampling += config["DEFAULT"]["300000"]["SAMPLING"] * config["DEFAULT"]["300000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["150000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["300000"]["FREQ"]): # 150 khz
sumSampling += config["DEFAULT"]["150000"]["SAMPLING"] * config["DEFAULT"]["150000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["75000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["150000"]["FREQ"]): # 75 khz
sumSampling += config["DEFAULT"]["75000"]["SAMPLING"] * config["DEFAULT"]["75000"]["CPE"] / _CyclesPerElement_
elif (_SystemFrequency_ > config["DEFAULT"]["38000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["75000"]["FREQ"]): #38 khz
sumSampling += config["DEFAULT"]["38000"]["SAMPLING"] * config["DEFAULT"]["38000"]["CPE"] / _CyclesPerElement_
sampleRate = _SystemFrequency_ * (sumSampling)
# Meters Per Sample
metersPerSample = 0
if sampleRate == 0:
metersPerSample = 0.0
else:
metersPerSample = math.cos(_BeamAngle_ / 180.0 * math.pi) * _SpeedOfSound_ / 2.0 / sampleRate
# Lag Samples
lagSamples = 0
if metersPerSample == 0:
lagSamples = 0
else:
lagSamples = 2 * math.trunc((math.trunc(_CWPBB_LagLength_ / metersPerSample) + 1.0) / 2.0)
# Xmt Scale
xmtScale = 1.0;
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value: # Check if NB
xmtScale = 1.0
else:
# Check for bad value
if lagSamples == 0:
xmtScale = 1.0
# Check which Broadband power is used
elif _BroadbandPower_:
xmtScale = (lagSamples - 1.0) / lagSamples
else:
xmtScale = 1.0 / lagSamples
# Range Reduction
rangeReduction = 0.0;
# Get the sum of all the selected WP XmtW and RefBin
sumXmtW = xmtW_1200000 + xmtW_600000 + xmtW_300000 + xmtW_150000 + xmtW_75000 + xmtW_38000
sumRefBin = refBin_1200000 + refBin_600000 + refBin_300000 + refBin_150000 + refBin_75000 + refBin_38000
beamXmtPowerProfile = xmtScale * sumXmtW
# Check for bad values
if sumXmtW == 0:
rangeReduction = 0.0
else:
rangeReduction = 10.0 * math.log10(beamXmtPowerProfile / sumXmtW) * sumRefBin + 1.0
# Bin Samples
binSamples = 0;
if metersPerSample == 0:
binSamples = 0
else:
binSamples = math.trunc(_CWPBS_ / metersPerSample)
# Code Repeats
codeRepeats = 0;
if lagSamples == 0:
codeRepeats = 0
else:
# Cast BinSamples and LagSamples to double because Truncate only takes doubles
# Make the result of Truncate an int
if (math.trunc(binSamples / lagSamples)) + 1.0 < 2.0:
codeRepeats = 2
else:
codeRepeats = (math.trunc(binSamples / lagSamples)) + 1
# First Bin Position
pos = 0.0;
if _CWPBB_TransmitPulseType_ == rti_python.ADCP.AdcpCommands.eCWPBB_TransmitPulseType.NARROWBAND.value:
pos = (2.0 * _CWPBS_ + 0.05) / 2.0
else:
if _CWPBB_TransmitPulseType_ > 1:
pos = _CWPBS_
else:
pos = (lagSamples * (codeRepeats - 1.0) * metersPerSample + _CWPBS_ + _CWPBB_LagLength_) / 2.0
firstBinPosition = _CWPBL_ + pos;
# Profile Range based off Settings
profileRangeSettings = _CWPBL_ + (_CWPBS_ * _CWPBN_);
# Set the predicted ranges PredictedRanges
wp = 0.0;
bt = 0.0;
if _SystemFrequency_ > config["DEFAULT"]["1200000"]["FREQ"]: # 1200 khz
bt = btRange_1200000
wp = wpRange_1200000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["600000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["1200000"]["FREQ"]): # 600 khz
bt = btRange_600000
wp = wpRange_600000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["300000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["600000"]["FREQ"]): # 300 khz
bt = btRange_300000
wp = wpRange_300000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["150000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["300000"]["FREQ"]): # 150 khz
bt = btRange_150000
wp = wpRange_150000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["75000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["150000"]["FREQ"]): # 75 khz
bt = btRange_75000
wp = wpRange_75000 + rangeReduction
elif (_SystemFrequency_ > config["DEFAULT"]["38000"]["FREQ"]) and (_SystemFrequency_ < config["DEFAULT"]["75000"]["FREQ"]): #38 khz
bt = btRange_38000;
wp = wpRange_38000 + rangeReduction;
return (bt, wp, firstBinPosition, profileRangeSettings)
def calc_absorption(_SystemFrequency_, _SpeedOfSound_, _Salinity_, _Temperature_, _XdcrDepth_):
"""
Calculate the water absorption.
:param _SystemFrequency_: System frequency
:param _SpeedOfSound_: Speed of Sound m/s
:param _Salinity_: Salinity in ppt.
:param _Temperature_: Water Temperature in C
:param _XdcrDepth_: Transducer Depth in m.
:return: Water Absorption.
"""
if _SpeedOfSound_ == 0 or _Salinity_ == 0 or _SystemFrequency_ == 0:
return 0
pH = 8.0
P1 = 1.0
# Frequency
freq = _SystemFrequency_ / 1000.0
# A1
# dB Km^-1 KHz^-1
A1 = 8.68 / _SpeedOfSound_ * 10.0 ** (0.78 * pH - 5.0)
# f1
f1 = 2.8 * ((_Salinity_ / 35.0) ** 0.5) * (10.0 ** (4.0 - 1245.0 / (273.0 + _Temperature_)))
# A2
# dB km^-1 kHz^-1
A2 = 21.44 * _Salinity_ / _SpeedOfSound_ * (1.0 + 0.025 * _Temperature_)
# P2
P2 = 1.0 - 1.37 * (10.0 ** (-4.0)) * _XdcrDepth_ + 6.2 * (10.0 ** (-9.0)) * (_XdcrDepth_ ** 2)
# f2
# kHz
f2 = 8.17 * (10.0 ** (8.0 - 1990.0 / (273.0 + _Temperature_))) / (1.0 + 0.0018 * (_Salinity_ - 35.0))
# A3
A3 = 4.93 * (10.0 ** (-4.0)) - 2.59 * (10.0 ** (-5.0)) * _Temperature_ + 9.11 * (10.0 ** (-7.0)) * (_Temperature_ ** 2.0)
# P3
P3 = 1.0 - 3.83 * (10.0 ** (-5.0)) * _XdcrDepth_ + 4.9 * (10.0 ** (-10.0)) * (_XdcrDepth_ ** 2.0)
# Boric Acid Relaxation
bar = A1 * P1 * f1 * (freq ** 2.0) / ((freq ** 2.0) + (f1 ** 2.0)) / 1000.0
# MgSO3 Magnesium Sulphate Relaxation
msr = A2 * P2 * f2 * (freq ** 2.0) / ((freq ** 2.0) + (f2 ** 2.0)) / 1000.0
# Freshwater Attenuation
fa = A3 * P3 * (freq ** 2.0) / 1000.0
# Absorption
return bar + msr + fa
def test_calc_range():
(bt_range, wp_range, first_bin, cfg_range) = calculate_predicted_range(CWPON=True,
CWPBB=1,
CWPBS=4.0,
CWPBN=30,
CWPBL=1.0,
CBTON=True,
CBTBB=1,
SystemFrequency=288000.0,
BeamDiameter=0.075,
CyclesPerElement=12,
BeamAngle=20,
SpeedOfSound=1490,
CWPBB_LagLength=1.0,
BroadbandPower=True,
Temperature=10.0,
Salinity=35.0,
XdcrDepth=0.0)
user_cfg_range = 1.0 + (4.0 * 30)
assert pytest.approx(wp_range, 0.01) == 100.05
assert pytest.approx(bt_range, 0.01) == 199.14
assert pytest.approx(first_bin, 0.01) == 5.484
assert pytest.approx(cfg_range, 0.01) == user_cfg_range
def test_calc_range_nb():
(bt_range, wp_range, first_bin, cfg_range) = calculate_predicted_range(CWPON=True,
CWPBB=0,
CWPBS=4.0,
CWPBN=30,
CWPBL=1.0,
CBTON=True,
CBTBB=0,
SystemFrequency=288000.0,
BeamDiameter=0.075,
CyclesPerElement=12,
BeamAngle=20,
SpeedOfSound=1490,
CWPBB_LagLength=1.0,
BroadbandPower=True,
Temperature=10.0,
Salinity=35.0,
XdcrDepth=0.0)
user_cfg_range = 1.0 + (4.0 * 30)
assert pytest.approx(wp_range, 0.01) == 152.57
assert pytest.approx(bt_range, 0.01) == 319.14
assert pytest.approx(first_bin, 0.01) == 5.025
assert pytest.approx(cfg_range, 0.01) == user_cfg_range
|
ricorx7/rti_python
|
ADCP/Predictor/Range.py
|
Python
|
bsd-3-clause
| 30,092
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nomcom', '0004_auto_20151027_0829'),
]
operations = [
migrations.RemoveField(
model_name='position',
name='incumbent',
),
]
|
wpjesus/codematch
|
ietf/nomcom/migrations/0005_remove_position_incumbent.py
|
Python
|
bsd-3-clause
| 348
|
import sys
from ..pakbase import Package
class Mt3dAdv(Package):
"""
MT3DMS Advection Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which
this package will be added.
mixelm : int
MIXELM is an integer flag for the advection solution option.
MIXELM = 0, the standard finite-difference method with upstream or
central-in-space weighting, depending on the value of NADVFD;
= 1, the forward-tracking method of characteristics (MOC);
= 2, the backward-tracking modified method of characteristics (MMOC);
= 3, the hybrid method of characteristics (HMOC) with MOC or MMOC
automatically and dynamically selected;
= -1, the third-order TVD scheme (ULTIMATE).
percel : float
PERCEL is the Courant number (i.e., the number of cells, or a
fraction of a cell) advection will be allowed in any direction in one
transport step.
For implicit finite-difference or particle-tracking-based schemes,
there is no limit on PERCEL, but for accuracy reasons, it is generally
not set much greater than one. Note, however, that the PERCEL limit is
checked over the entire model grid. Thus, even if PERCEL > 1,
advection may not be more than one cell's length at most model
locations.
For the explicit finite-difference or the third-order TVD scheme,
PERCEL is also a stability constraint which must not exceed one and
will be automatically reset to one if a value greater than one is
specified.
mxpart : int
MXPART is the maximum total number of moving particles allowed and is
used only when MIXELM = 1 or 3.
nadvfd : int
NADVFD is an integer flag indicating which weighting scheme should be
used; it is needed only when the advection term is solved using the
implicit finite- difference method.
NADVFD = 0 or 1, upstream weighting (default); = 2,central-in-space
weighting.
itrack : int
ITRACK is a flag indicating which particle-tracking algorithm is
selected for the Eulerian-Lagrangian methods.
ITRACK = 1, the first-order Euler algorithm is used.
= 2, the fourth-order Runge-Kutta algorithm is used; this option is
computationally demanding and may be needed only when PERCEL is set
greater than one.
= 3, the hybrid first- and fourth-order algorithm is used; the
Runge-Kutta algorithm is used in sink/source cells and the cells next
to sinks/sources while the Euler algorithm is used elsewhere.
wd : float
is a concentration weighting factor between 0.5 and 1. It is used for
operator splitting in the particle- tracking-based methods. The value
of 0.5 is generally adequate. The value of WD may be adjusted to
achieve better mass balance. Generally, it can be increased toward
1.0 as advection becomes more dominant.
dceps : float
is a small Relative Cell Concentration Gradient below which advective
transport is considered
nplane : int
NPLANE is a flag indicating whether the random or
fixed pattern is selected for initial placement of moving particles.
If NPLANE = 0, the random pattern is selected for initial placement.
Particles are distributed randomly in both the horizontal and vertical
directions by calling a random number generator (Figure 18b). This
option is usually preferred and leads to smaller mass balance
discrepancy in nonuniform or diverging/converging flow fields.
If NPLANE > 0, the fixed pattern is selected for initial placement.
The value of NPLANE serves as the number of vertical 'planes' on
which initial particles are placed within each cell block (Figure 18a).
The fixed pattern may work better than the random pattern only in
relatively uniform flow fields. For two-dimensional simulations in
plan view, set NPLANE = 1. For cross sectional or three-dimensional
simulations, NPLANE = 2 is normally adequate. Increase NPLANE if more
resolution in the vertical direction is desired.
npl : int
NPL is the number of initial particles per cell to be placed at cells
where the Relative Cell Concentration Gradient is less than or equal
to DCEPS. Generally, NPL can be set to zero since advection is
considered insignificant when the Relative Cell Concentration Gradient
is less than or equal to DCEPS. Setting NPL equal to NPH causes a
uniform number of particles to be placed in every cell over the entire
grid (i.e., the uniform approach).
nph : int
NPH is the number of initial particles per cell to be placed at cells
where the Relative Cell Concentration Gradient is greater than DCEPS.
The selection of NPH depends on the nature of the flow field and also
the computer memory limitation. Generally, a smaller number should be
used in relatively uniform flow fields and a larger number should be
used in relatively nonuniform flow fields. However, values exceeding
16 in two-dimensional simulation or 32 in three- dimensional
simulation are rarely necessary. If the random pattern is chosen, NPH
particles are randomly distributed within the cell block. If the fixed
pattern is chosen, NPH is divided by NPLANE to yield the number of
particles to be placed per vertical plane, which is rounded to one of
the values shown in Figure 30.
npmin : int
is the minimum number of particles allowed per cell. If the number of
particles in a cell at the end of a transport step is fewer than
NPMIN, new particles are inserted into that cell to maintain a
sufficient number of particles. NPMIN can be set to zero in relatively
uniform flow fields and to a number greater than zero in
diverging/converging flow fields. Generally, a value between zero and
four is adequate.
npmax : int
NPMAX is the maximum number of particles allowed per cell. If the
number of particles in a cell exceeds NPMAX, all particles are removed
from that cell and replaced by a new set of particles equal to NPH to
maintain mass balance. Generally, NPMAX can be set to approximately
two times of NPH.
interp : int
is a flag indicating the concentration interpolation method for use in
the MMOC scheme. Currently, only linear interpolation is implemented.
nlsink : int
s a flag indicating whether the random or fixed pattern is selected
for initial placement of particles to approximate sink cells in the
MMOC scheme. The convention is the same as that for NPLANE. It is
generally adequate to set NLSINK equivalent to NPLANE.
npsink : int
is the number of particles used to approximate sink cells in the MMOC
scheme. The convention is the same as that for NPH. It is generally
adequate to set NPSINK equivalent to NPH.
dchmoc : float
DCHMOC is the critical Relative Concentration Gradient for
controlling the selective use of either MOC or MMOC in the HMOC
solution scheme.
The MOC solution is selected at cells where the Relative
Concentration Gradient is greater than DCHMOC.
The MMOC solution is selected at cells where the Relative
Concentration Gradient is less than or equal to DCHMOC.
extension : string
Filename extension (default is 'adv')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package. If filenames=None the package name
will be created using the model name and package extension. If a
single string is passed the package will be set to the string.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.mt3d.Mt3dms()
>>> adv = flopy.mt3d.Mt3dAdv(m)
"""
def __init__(self, model, mixelm=3, percel=0.75, mxpart=800000, nadvfd=1,
itrack=3, wd=0.5,
dceps=1e-5, nplane=2, npl=10, nph=40, npmin=5, npmax=80,
nlsink=0, npsink=15,
dchmoc=0.0001, extension='adv', unitnumber=None,
filenames=None):
if unitnumber is None:
unitnumber = Mt3dAdv.defaultunit()
elif unitnumber == 0:
unitnumber = Mt3dAdv.reservedunit()
# set filenames
if filenames is None:
filenames = [None]
elif isinstance(filenames, str):
filenames = [filenames]
# Fill namefile items
name = [Mt3dAdv.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
self.mixelm = mixelm
self.percel = percel
self.mxpart = mxpart
self.nadvfd = nadvfd
self.mixelm = mixelm
self.itrack = itrack
self.wd = wd
self.dceps = dceps
self.nplane = nplane
self.npl = npl
self.nph = nph
self. npmin = npmin
self.npmax = npmax
self.interp = 1 # Command-line 'interp' might once be needed if MT3DMS is updated to include other interpolation method
self.nlsink = nlsink
self.npsink = npsink
self.dchmoc = dchmoc
self.parent.add_package(self)
return
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
f_adv = open(self.fn_path, 'w')
f_adv.write('%10i%10f%10i%10i\n' % (self.mixelm, self.percel,
self.mxpart, self.nadvfd))
if (self.mixelm > 0):
f_adv.write('%10i%10f\n' % (self.itrack, self.wd))
if ((self.mixelm == 1) or (self.mixelm == 3)):
f_adv.write('%10.4e%10i%10i%10i%10i%10i\n' % (self.dceps,
self.nplane, self.npl, self.nph, self. npmin,
self.npmax))
if ((self.mixelm == 2) or (self.mixelm == 3)):
f_adv.write('%10i%10i%10i\n' % (self.interp, self.nlsink,
self.npsink))
if (self.mixelm == 3):
f_adv.write('%10f\n' % (self.dchmoc))
f_adv.close()
return
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
adv : Mt3dAdv object
Mt3dAdv object.
Examples
--------
>>> import flopy
>>> mt = flopy.mt3d.Mt3dms()
>>> adv = flopy.mt3d.Mt3dAdv.load('test.adv', m)
"""
if model.verbose:
sys.stdout.write('loading adv package file...\n')
# Open file, if necessary
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# Dataset 0 -- comment line
while True:
line = f.readline()
if line[0] != '#':
break
# Item B1: MIXELM, PERCEL, MXPART, NADVFD - line already read above
if model.verbose:
print(' loading MIXELM, PERCEL, MXPART, NADVFD...')
mixelm = int(line[0:10])
percel = float(line[10:20])
mxpart = 0
if mixelm == 1 or mixelm == 3:
if len(line[20:30].strip()) > 0:
mxpart = int(line[20:30])
nadvfd = 0
if mixelm == 0:
if len(line[30:40].strip()) > 0:
nadvfd = int(line[30:40])
if model.verbose:
print(' MIXELM {}'.format(mixelm))
print(' PERCEL {}'.format(nadvfd))
print(' MXPART {}'.format(mxpart))
print(' NADVFD {}'.format(nadvfd))
# Item B2: ITRACK WD
itrack = None
wd = None
if mixelm == 1 or mixelm == 2 or mixelm == 3:
if model.verbose:
print(' loading ITRACK, WD...')
line = f.readline()
itrack = int(line[0:10])
wd = float(line[10:20])
if model.verbose:
print(' ITRACK {}'.format(itrack))
print(' WD {}'.format(wd))
# Item B3: DCEPS, NPLANE, NPL, NPH, NPMIN, NPMAX
dceps = None
nplane = None
npl = None
nph = None
npmin = None
npmax = None
if mixelm == 1 or mixelm == 3:
if model.verbose:
print(' loading DCEPS, NPLANE, NPL, NPH, NPMIN, NPMAX...')
line = f.readline()
dceps = float(line[0:10])
nplane = int(line[10:20])
npl = int(line[20:30])
nph = int(line[30:40])
npmin = int(line[40:50])
npmax = int(line[50:60])
if model.verbose:
print(' DCEPS {}'.format(dceps))
print(' NPLANE {}'.format(nplane))
print(' NPL {}'.format(npl))
print(' NPH {}'.format(nph))
print(' NPMIN {}'.format(npmin))
print(' NPMAX {}'.format(npmax))
# Item B4: INTERP, NLSINK, NPSINK
interp = None
nlsink = None
npsink = None
if mixelm == 2 or mixelm == 3:
if model.verbose:
print(' loading INTERP, NLSINK, NPSINK...')
line = f.readline()
interp = int(line[0:10])
nlsink = int(line[10:20])
npsink = int(line[20:30])
if model.verbose:
print(' INTERP {}'.format(interp))
print(' NLSINK {}'.format(nlsink))
print(' NPSINK {}'.format(npsink))
# Item B5: DCHMOC
dchmoc = None
if mixelm == 3:
if model.verbose:
print(' loading DCHMOC...')
line = f.readline()
dchmoc = float(line[0:10])
if model.verbose:
print(' DCHMOC {}'.format(dchmoc))
# set package unit number
unitnumber = None
filenames = [None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=Mt3dAdv.ftype())
# Construct and return adv package
adv = Mt3dAdv(model, mixelm=mixelm, percel=percel,
mxpart=mxpart, nadvfd=nadvfd,
itrack=itrack, wd=wd,
dceps=dceps, nplane=nplane, npl=npl, nph=nph,
npmin=npmin, npmax=npmax,
nlsink=nlsink, npsink=npsink,
dchmoc=dchmoc, unitnumber=unitnumber,
filenames=filenames)
return adv
@staticmethod
def ftype():
return 'ADV'
@staticmethod
def defaultunit():
return 32
@staticmethod
def reservedunit():
return 2
|
brclark-usgs/flopy
|
flopy/mt3d/mtadv.py
|
Python
|
bsd-3-clause
| 16,549
|
"""
Testing the Intravoxel incoherent motion module
The values of the various parameters used in the tests are inspired by
the study of the IVIM model applied to MR images of the brain by
Federau, Christian, et al. [1].
References
----------
.. [1] Federau, Christian, et al. "Quantitative measurement
of brain perfusion with intravoxel incoherent motion
MR imaging." Radiology 265.3 (2012): 874-881.
"""
import warnings
import numpy as np
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_raises, assert_array_less, run_module_suite,
assert_, assert_equal)
from dipy.testing import assert_greater_equal
import pytest
from dipy.reconst.ivim import ivim_prediction, IvimModel
from dipy.core.gradients import gradient_table, generate_bvecs
from dipy.sims.voxel import multi_tensor
from dipy.utils.optpkg import optional_package
cvxpy, have_cvxpy, _ = optional_package("cvxpy")
needs_cvxpy = pytest.mark.skipif(not have_cvxpy, reason="REQUIRES CVXPY")
def setup_module():
global gtab, ivim_fit_single, ivim_model_trr, data_single, params_trr, \
data_multi, ivim_params_trr, D_star, D, f, S0, gtab_with_multiple_b0, \
noisy_single, mevals, gtab_no_b0, ivim_fit_multi, ivim_model_VP, \
f_VP, D_star_VP, D_VP, params_VP
# Let us generate some data for testing.
bvals = np.array([0., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300., 400.,
500., 600., 700., 800., 900., 1000.])
N = len(bvals)
bvecs = generate_bvecs(N)
gtab = gradient_table(bvals, bvecs.T, b0_threshold=0)
S0, f, D_star, D = 1000.0, 0.132, 0.00885, 0.000921
# params for a single voxel
params_trr = np.array([S0, f, D_star, D])
mevals = np.array(([D_star, D_star, D_star], [D, D, D]))
# This gives an isotropic signal.
signal = multi_tensor(gtab, mevals, snr=None, S0=S0,
fractions=[f * 100, 100 * (1 - f)])
# Single voxel data
data_single = signal[0]
data_multi = np.zeros((2, 2, 1, len(gtab.bvals)))
data_multi[0, 0, 0] = data_multi[0, 1, 0] = data_multi[
1, 0, 0] = data_multi[1, 1, 0] = data_single
ivim_params_trr = np.zeros((2, 2, 1, 4))
ivim_params_trr[0, 0, 0] = ivim_params_trr[0, 1, 0] = params_trr
ivim_params_trr[1, 0, 0] = ivim_params_trr[1, 1, 0] = params_trr
ivim_model_trr = IvimModel(gtab, fit_method='trr')
ivim_model_one_stage = IvimModel(gtab, fit_method='trr')
ivim_fit_single = ivim_model_trr.fit(data_single)
ivim_fit_multi = ivim_model_trr.fit(data_multi)
ivim_model_one_stage.fit(data_single)
ivim_model_one_stage.fit(data_multi)
bvals_no_b0 = np.array([5., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300., 400.,
500., 600., 700., 800., 900., 1000.])
_ = generate_bvecs(N) # bvecs_no_b0
gtab_no_b0 = gradient_table(bvals_no_b0, bvecs.T, b0_threshold=0)
bvals_with_multiple_b0 = np.array([0., 0., 0., 0., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300.,
400., 500., 600., 700., 800., 900.,
1000.])
bvecs_with_multiple_b0 = generate_bvecs(N)
gtab_with_multiple_b0 = gradient_table(bvals_with_multiple_b0,
bvecs_with_multiple_b0.T,
b0_threshold=0)
noisy_single = np.array([4243.71728516, 4317.81298828, 4244.35693359,
4439.36816406, 4420.06201172, 4152.30078125,
4114.34912109, 4104.59375, 4151.61914062,
4003.58374023, 4013.68408203, 3906.39428711,
3909.06079102, 3495.27197266, 3402.57006836,
3163.10180664, 2896.04003906, 2663.7253418,
2614.87695312, 2316.55371094, 2267.7722168])
noisy_multi = np.zeros((2, 2, 1, len(gtab.bvals)))
noisy_multi[0, 1, 0] = noisy_multi[
1, 0, 0] = noisy_multi[1, 1, 0] = noisy_single
noisy_multi[0, 0, 0] = data_single
ivim_model_VP = IvimModel(gtab, fit_method='VarPro')
f_VP, D_star_VP, D_VP = 0.13, 0.0088, 0.000921
# params for a single voxel
params_VP = np.array([f, D_star, D])
ivim_params_VP = np.zeros((2, 2, 1, 3))
ivim_params_VP[0, 0, 0] = ivim_params_VP[0, 1, 0] = params_VP
ivim_params_VP[1, 0, 0] = ivim_params_VP[1, 1, 0] = params_VP
def single_exponential(S0, D, bvals):
return S0 * np.exp(-bvals * D)
def test_single_voxel_fit():
"""
Test the implementation of the fitting for a single voxel.
Here, we will use the multi_tensor function to generate a
bi-exponential signal. The multi_tensor generates a multi
tensor signal and expects eigenvalues of each tensor in mevals.
Our basic test requires a scalar signal isotropic signal and
hence we set the same eigenvalue in all three directions to
generate the required signal.
The bvals, f, D_star and D are inspired from the paper by
Federau, Christian, et al. We use the function "generate_bvecs"
to simulate bvectors corresponding to the bvalues.
In the two stage fitting routine, initially we fit the signal
values for bvals less than the specified split_b using the
TensorModel and get an intial guess for f and D. Then, using
these parameters we fit the entire data for all bvalues.
"""
est_signal = ivim_prediction(ivim_fit_single.model_params, gtab)
assert_array_equal(est_signal.shape, data_single.shape)
assert_array_almost_equal(ivim_fit_single.model_params, params_trr)
assert_array_almost_equal(est_signal, data_single)
# Test predict function for single voxel
p = ivim_fit_single.predict(gtab)
assert_array_equal(p.shape, data_single.shape)
assert_array_almost_equal(p, data_single)
def test_multivoxel():
"""Test fitting with multivoxel data.
We generate a multivoxel signal to test the fitting for multivoxel data.
This is to ensure that the fitting routine takes care of signals packed as
1D, 2D or 3D arrays.
"""
ivim_fit_multi = ivim_model_trr.fit(data_multi)
est_signal = ivim_fit_multi.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(ivim_fit_multi.model_params, ivim_params_trr)
assert_array_almost_equal(est_signal, data_multi)
def test_ivim_errors():
"""
Test if errors raised in the module are working correctly.
Scipy introduced bounded least squares fitting in the version 0.17
and is not supported by the older versions. Initializing an IvimModel
with bounds for older Scipy versions should raise an error.
"""
ivim_model_trr = IvimModel(gtab, bounds=([0., 0., 0., 0.],
[np.inf, 1., 1., 1.]),
fit_method='trr')
ivim_fit = ivim_model_trr.fit(data_multi)
est_signal = ivim_fit.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(ivim_fit.model_params, ivim_params_trr)
assert_array_almost_equal(est_signal, data_multi)
def test_mask():
"""
Test whether setting incorrect mask raises and error
"""
mask_correct = data_multi[..., 0] > 0.2
mask_not_correct = np.array([[False, True, False], [True, False]],
dtype=np.bool)
ivim_fit = ivim_model_trr.fit(data_multi, mask_correct)
est_signal = ivim_fit.predict(gtab, S0=1.)
assert_array_equal(est_signal.shape, data_multi.shape)
assert_array_almost_equal(est_signal, data_multi)
assert_array_almost_equal(ivim_fit.model_params, ivim_params_trr)
assert_raises(ValueError, ivim_model_trr.fit, data_multi,
mask=mask_not_correct)
def test_with_higher_S0():
"""
Test whether fitting works for S0 > 1.
"""
# params for a single voxel
S0_2 = 1000.
params2 = np.array([S0_2, f, D_star, D])
mevals2 = np.array(([D_star, D_star, D_star], [D, D, D]))
# This gives an isotropic signal.
signal2 = multi_tensor(gtab, mevals2, snr=None, S0=S0_2,
fractions=[f * 100, 100 * (1 - f)])
# Single voxel data
data_single2 = signal2[0]
ivim_fit = ivim_model_trr.fit(data_single2)
est_signal = ivim_fit.predict(gtab)
assert_array_equal(est_signal.shape, data_single2.shape)
assert_array_almost_equal(est_signal, data_single2)
assert_array_almost_equal(ivim_fit.model_params, params2)
def test_b0_threshold_greater_than0():
"""
Added test case for default b0_threshold set to 50.
Checks if error is thrown correctly.
"""
bvals_b0t = np.array([50., 10., 20., 30., 40., 60., 80., 100.,
120., 140., 160., 180., 200., 300., 400.,
500., 600., 700., 800., 900., 1000.])
N = len(bvals_b0t)
bvecs = generate_bvecs(N)
gtab = gradient_table(bvals_b0t, bvecs.T)
with assert_raises(ValueError) as vae:
_ = IvimModel(gtab, fit_method='trr')
b0_s = "The IVIM model requires a measurement at b==0. As of "
assert b0_s in vae.exception
def test_bounds_x0():
"""
Test to check if setting bounds for signal where initial value is
higher than subsequent values works.
These values are from the IVIM dataset which can be obtained by using
the `read_ivim` function from dipy.data.fetcher. These are values from
the voxel [160, 98, 33] which can be obtained by :
.. code-block:: python
from dipy.data.fetcher import read_ivim
img, gtab = read_ivim()
data = load_nifti_data(img)
signal = data[160, 98, 33, :]
"""
x0_test = np.array([1., 0.13, 0.001, 0.0001])
test_signal = ivim_prediction(x0_test, gtab)
ivim_fit = ivim_model_trr.fit(test_signal)
est_signal = ivim_fit.predict(gtab)
assert_array_equal(est_signal.shape, test_signal.shape)
def test_predict():
"""
Test the model prediction API.
The predict method is already used in previous tests for estimation of the
signal. But here, we will test is separately.
"""
assert_array_almost_equal(ivim_fit_single.predict(gtab),
data_single)
assert_array_almost_equal(ivim_model_trr.predict
(ivim_fit_single.model_params, gtab),
data_single)
ivim_fit_multi = ivim_model_trr.fit(data_multi)
assert_array_almost_equal(ivim_fit_multi.predict(gtab),
data_multi)
def test_fit_object():
"""
Test the method of IvimFit class
"""
assert_raises(IndexError, ivim_fit_single.__getitem__, (-.1, 0, 0))
# Check if the S0 called is matching
assert_array_almost_equal(
ivim_fit_single.__getitem__(0).model_params, 1000.)
ivim_fit_multi = ivim_model_trr.fit(data_multi)
# Should raise a TypeError if the arguments are not passed as tuple
assert_raises(TypeError, ivim_fit_multi.__getitem__, -.1, 0)
# Should return IndexError if invalid indices are passed
assert_raises(IndexError, ivim_fit_multi.__getitem__, (100, -0))
assert_raises(IndexError, ivim_fit_multi.__getitem__, (100, -0, 2))
assert_raises(IndexError, ivim_fit_multi.__getitem__, (-100, 0))
assert_raises(IndexError, ivim_fit_multi.__getitem__, [-100, 0])
assert_raises(IndexError, ivim_fit_multi.__getitem__, (1, 0, 0, 3, 4))
# Check if the get item returns the S0 value for voxel (1,0,0)
assert_array_almost_equal(
ivim_fit_multi.__getitem__((1, 0, 0)).model_params[0],
data_multi[1, 0, 0][0])
def test_shape():
"""
Test if `shape` in `IvimFit` class gives the correct output.
"""
assert_array_equal(ivim_fit_single.shape, ())
ivim_fit_multi = ivim_model_trr.fit(data_multi)
assert_array_equal(ivim_fit_multi.shape, (2, 2, 1))
def test_multiple_b0():
# Generate a signal with multiple b0
# This gives an isotropic signal.
signal = multi_tensor(gtab_with_multiple_b0, mevals, snr=None, S0=S0,
fractions=[f * 100, 100 * (1 - f)])
# Single voxel data
data_single = signal[0]
ivim_model_multiple_b0 = IvimModel(gtab_with_multiple_b0, fit_method='trr')
ivim_model_multiple_b0.fit(data_single)
# Test if all signals are positive
def test_no_b0():
assert_raises(ValueError, IvimModel, gtab_no_b0)
def test_noisy_fit():
"""
Test fitting for noisy signals. This tests whether the threshold condition
applies correctly and returns the linear fitting parameters.
For older scipy versions, the returned value of `f` from a linear fit is
around 135 and D and D_star values are equal. Hence doing a test based on
Scipy version.
"""
model_one_stage = IvimModel(gtab, fit_method='trr')
with warnings.catch_warnings(record=True) as w:
fit_one_stage = model_one_stage.fit(noisy_single)
assert_equal(len(w), 3)
for l_w in w:
assert_(issubclass(l_w.category, UserWarning))
assert_("" in str(w[0].message))
assert_("x0 obtained from linear fitting is not feasibile" in
str(w[0].message))
assert_("x0 is unfeasible" in str(w[1].message))
assert_("Bounds are violated for leastsq fitting" in str(w[2].message))
assert_array_less(fit_one_stage.model_params, [10000., 0.3, .01, 0.001])
def test_S0():
"""
Test if the `IvimFit` class returns the correct S0
"""
assert_array_almost_equal(ivim_fit_single.S0_predicted, S0)
assert_array_almost_equal(ivim_fit_multi.S0_predicted,
ivim_params_trr[..., 0])
def test_perfusion_fraction():
"""
Test if the `IvimFit` class returns the correct f
"""
assert_array_almost_equal(ivim_fit_single.perfusion_fraction, f)
assert_array_almost_equal(
ivim_fit_multi.perfusion_fraction, ivim_params_trr[..., 1])
def test_D_star():
"""
Test if the `IvimFit` class returns the correct D_star
"""
assert_array_almost_equal(ivim_fit_single.D_star, D_star)
assert_array_almost_equal(ivim_fit_multi.D_star, ivim_params_trr[..., 2])
def test_D():
"""
Test if the `IvimFit` class returns the correct D
"""
assert_array_almost_equal(ivim_fit_single.D, D)
assert_array_almost_equal(ivim_fit_multi.D, ivim_params_trr[..., 3])
def test_estimate_linear_fit():
"""
Test the linear estimates considering a single exponential fit.
"""
data_single_exponential_D = single_exponential(S0, D, gtab.bvals)
assert_array_almost_equal(ivim_model_trr.estimate_linear_fit(
data_single_exponential_D,
split_b=500.,
less_than=False),
(S0, D))
data_single_exponential_D_star = single_exponential(S0, D_star, gtab.bvals)
assert_array_almost_equal(ivim_model_trr.estimate_linear_fit(
data_single_exponential_D_star,
split_b=100.,
less_than=True),
(S0, D_star))
def test_estimate_f_D_star():
"""
Test if the `estimate_f_D_star` returns the correct parameters after a
non-linear fit.
"""
params_f_D = f + 0.001, D + 0.0001
assert_array_almost_equal(ivim_model_trr.estimate_f_D_star(params_f_D,
data_single, S0,
D),
(f, D_star))
def test_fit_one_stage():
"""
Test to check the results for the one_stage linear fit.
"""
model = IvimModel(gtab, two_stage=False)
fit = model.fit(data_single)
linear_fit_params = [9.88834140e+02, 1.19707191e-01, 7.91176970e-03,
9.30095210e-04]
linear_fit_signal = [988.83414044, 971.77122546, 955.46786293,
939.87125905, 924.93258982, 896.85182201,
870.90346447, 846.81187693, 824.34108781,
803.28900104, 783.48245048, 764.77297789,
747.03322866, 669.54798887, 605.03328304,
549.00852235, 499.21077611, 454.40299244,
413.83192296, 376.98072773, 343.45531017]
assert_array_almost_equal(fit.model_params, linear_fit_params)
assert_array_almost_equal(fit.predict(gtab), linear_fit_signal)
def test_leastsq_failing():
"""
Test for cases where leastsq fitting fails and the results from a linear
fit is returned.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
fit_single = ivim_model_trr.fit(noisy_single)
assert_greater_equal(len(w), 3)
u_warn = [l_w for l_w in w if issubclass(l_w.category, UserWarning)]
assert_greater_equal(len(u_warn), 3)
message = ["x0 obtained from linear fitting is not feasibile",
"x0 is unfeasible",
"Bounds are violated for leastsq fitting"]
assert_greater_equal(len([lw for lw in u_warn for m in message
if m in str(lw.message)]), 3)
# Test for the S0 and D values
assert_array_almost_equal(fit_single.S0_predicted, 4356.268901117833)
assert_array_almost_equal(fit_single.D, 6.936684e-04)
def test_leastsq_error():
"""
Test error handling of the `_leastsq` method works when unfeasible x0 is
passed. If an unfeasible x0 value is passed using which leastsq fails, the
x0 value is returned as it is.
"""
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
fit = ivim_model_trr._leastsq(data_single, [-1, -1, -1, -1])
assert_greater_equal(len(w), 1)
assert_(issubclass(w[-1].category, UserWarning))
assert_("" in str(w[-1].message))
assert_("x0 is unfeasible" in str(w[-1].message))
assert_array_almost_equal(fit, [-1, -1, -1, -1])
@needs_cvxpy
def test_perfusion_fraction_vp():
"""
Test if the `IvimFit` class returns the correct f
"""
ivim_fit_VP = ivim_model_VP.fit(data_single)
assert_array_almost_equal(ivim_fit_VP.perfusion_fraction, f_VP,
decimal=2)
@needs_cvxpy
def test_D_star_vp():
"""
Test if the `IvimFit` class returns the correct D_star
"""
ivim_fit_VP = ivim_model_VP.fit(data_single)
assert_array_almost_equal(ivim_fit_VP.D_star, D_star_VP, decimal=4)
@needs_cvxpy
def test_D_vp():
"""
Test if the `IvimFit` class returns the correct D
"""
ivim_fit_VP = ivim_model_VP.fit(data_single)
assert_array_almost_equal(ivim_fit_VP.D, D_VP, decimal=4)
if __name__ == '__main__':
run_module_suite()
|
FrancoisRheaultUS/dipy
|
dipy/reconst/tests/test_ivim.py
|
Python
|
bsd-3-clause
| 19,029
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017-2018, plures
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Functions for generating test cases.
import sys
from itertools import accumulate, count, product
from collections import namedtuple
from random import randrange
from ndtypes import ndt, ApplySpec
from _testbuffer import get_sizeof_void_p
SIZEOF_PTR = get_sizeof_void_p()
Mem = namedtuple("Mem", "itemsize align")
# ======================================================================
# Check contiguous fixed dimensions
# ======================================================================
def c_datasize(t):
"""Check the datasize of contiguous arrays."""
datasize = t.itemsize
for v in t.shape:
datasize *= v
return datasize
# ======================================================================
# Check fixed dimensions with arbitary strides
# ======================================================================
def verify_datasize(t):
"""Verify the datasize of fixed dimensions with arbitrary strides."""
if t.itemsize == 0:
return t.datasize == 0
if t.datasize % t.itemsize:
return False
if t.ndim <= 0:
return t.ndim == 0 and not t.shape and not t.strides
if any(v < 0 for v in t.shape):
return False
if any(v % t.itemsize for v in t.strides):
return False
if 0 in t.shape:
return t.datasize == 0
imin = sum(t.strides[j]*(t.shape[j]-1) for j in range(t.ndim)
if t.strides[j] <= 0)
imax = sum(t.strides[j]*(t.shape[j]-1) for j in range(t.ndim)
if t.strides[j] > 0)
return t.datasize == (abs(imin) + imax + t.itemsize)
# ======================================================================
# Typed values
# ======================================================================
DTYPE_TEST_CASES = [
# Tuples
("()", Mem(itemsize=0, align=1)),
("(complex128)", Mem(itemsize=16, align=8)),
("(int8, int64)", Mem(itemsize=16, align=8)),
("(int8, int64, pack=1)", Mem(itemsize=9, align=1)),
("(int8, int64, pack=2)", Mem(itemsize=10, align=2)),
("(int8, int64, pack=4)", Mem(itemsize=12, align=4)),
("(int8, int64, pack=8)", Mem(itemsize=16, align=8)),
("(int8, int64, pack=16)", Mem(itemsize=32, align=16)),
("(int8, int64, align=1)", Mem(itemsize=16, align=8)),
("(int8, int64, align=2)", Mem(itemsize=16, align=8)),
("(int8, int64, align=4)", Mem(itemsize=16, align=8)),
("(int8, int64, align=8)", Mem(itemsize=16, align=8)),
("(int8, int64, align=16)", Mem(itemsize=16, align=16)),
("(int8 |align=1|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=2|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=4|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=8|, int64)", Mem(itemsize=16, align=8)),
("(int8 |align=16|, int64)", Mem(itemsize=16, align=16)),
("(uint16, (complex64))", Mem(itemsize=12, align=4)),
("(uint16, (complex64), pack=1)", Mem(itemsize=10, align=1)),
("(uint16, (complex64), pack=2)", Mem(itemsize=10, align=2)),
("(uint16, (complex64), pack=4)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), pack=8)", Mem(itemsize=16, align=8)),
("(uint16, (complex64), align=1)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), align=2)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), align=4)", Mem(itemsize=12, align=4)),
("(uint16, (complex64), align=8)", Mem(itemsize=16, align=8)),
# References to tuples
("&(uint16, (complex64), align=1)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("(uint16, &(complex64), pack=1)", Mem(itemsize=2+SIZEOF_PTR, align=1)),
# Constructor containing references to tuples
("Some(&(uint16, (complex64), align=1))", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("Some((uint16, &(complex64), pack=1))", Mem(itemsize=2+SIZEOF_PTR, align=1)),
# Optional tuples
("?(uint16, (complex64), align=1)", Mem(itemsize=12, align=4)),
("(uint16, ?(complex64), align=1)", Mem(itemsize=12, align=4)),
("?(uint16, ?(complex64), align=1)", Mem(itemsize=12, align=4)),
("?(uint16, (complex64), align=2)", Mem(itemsize=12, align=4)),
("(uint16, ?(complex64), align=4)", Mem(itemsize=12, align=4)),
("?(uint16, ?(complex64), align=8)", Mem(itemsize=16, align=8)),
# References to optional tuples or tuples with optional subtrees
("&?(uint16, (complex64), align=1)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&(uint16, ?(complex64), align=1)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
# Constructor containing optional tuples or tuples with optional subtrees
("Some(?(uint16, (complex64), align=1))", Mem(itemsize=12, align=4)),
("Some((uint16, ?(complex64), align=1))", Mem(itemsize=12, align=4)),
# Records
("{}", Mem(itemsize=0, align=1)),
("{x: complex128}", Mem(itemsize=16, align=8)),
("{x: int8, y: int64}", Mem(itemsize=16, align=8)),
("{x: int8, y: int64, pack=1}", Mem(itemsize=9, align=1)),
("{x: int8, y: int64, pack=2}", Mem(itemsize=10, align=2)),
("{x: int8, y: int64, pack=4}", Mem(itemsize=12, align=4)),
("{x: int8, y: int64, pack=8}", Mem(itemsize=16, align=8)),
("{x: int8, y: int64, pack=16}", Mem(itemsize=32, align=16)),
("{x: uint16, y: {z: complex128}}", Mem(itemsize=24, align=8)),
("{x: uint16, y: {z: complex128, align=16}}", Mem(itemsize=32, align=16)),
("{x: uint16, y: {z: complex128}, align=16}", Mem(itemsize=32, align=16)),
# Primitive types
("bool", Mem(itemsize=1, align=1)),
("int8", Mem(itemsize=1, align=1)),
("int16", Mem(itemsize=2, align=2)),
("int32", Mem(itemsize=4, align=4)),
("int64", Mem(itemsize=8, align=8)),
("uint8", Mem(itemsize=1, align=1)),
("uint16", Mem(itemsize=2, align=2)),
("uint32", Mem(itemsize=4, align=4)),
("uint64", Mem(itemsize=8, align=8)),
("float32", Mem(itemsize=4, align=4)),
("float64", Mem(itemsize=8, align=8)),
("complex64", Mem(itemsize=8, align=4)),
("complex128", Mem(itemsize=16, align=8)),
# Primitive optional types
("?bool", Mem(itemsize=1, align=1)),
("?int8", Mem(itemsize=1, align=1)),
("?int16", Mem(itemsize=2, align=2)),
("?int32", Mem(itemsize=4, align=4)),
("?int64", Mem(itemsize=8, align=8)),
("?uint8", Mem(itemsize=1, align=1)),
("?uint16", Mem(itemsize=2, align=2)),
("?uint32", Mem(itemsize=4, align=4)),
("?uint64", Mem(itemsize=8, align=8)),
("?float32", Mem(itemsize=4, align=4)),
("?float64", Mem(itemsize=8, align=8)),
("?complex64", Mem(itemsize=8, align=4)),
("?complex128", Mem(itemsize=16, align=8)),
# References
("&bool", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&int8", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&int16", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&int32", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&int64", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(uint8)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(uint16)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(uint32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(uint64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(float32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(float64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(complex64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(complex128)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
# Optional references
("?&bool", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?&int8", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?&int16", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?&int32", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?&int64", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(uint8)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(uint16)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(uint32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(uint64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(float32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(float64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(complex64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("?ref(complex128)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
# References to optional types
("&?bool", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&?int8", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&?int16", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&?int32", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("&?int64", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?uint8)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?uint16)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?uint32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?uint64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?float32)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?float64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?complex64)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
("ref(?complex128)", Mem(itemsize=SIZEOF_PTR, align=SIZEOF_PTR)),
# Constructors
("Some(bool)", Mem(itemsize=1, align=1)),
("Some(int8)", Mem(itemsize=1, align=1)),
("Some(int16)", Mem(itemsize=2, align=2)),
("Some(int32)", Mem(itemsize=4, align=4)),
("Some(int64)", Mem(itemsize=8, align=8)),
("Some(uint8)", Mem(itemsize=1, align=1)),
("Some(uint16)", Mem(itemsize=2, align=2)),
("Some(uint32)", Mem(itemsize=4, align=4)),
("Some(uint64)", Mem(itemsize=8, align=8)),
("Some(float32)", Mem(itemsize=4, align=4)),
("Some(float64)", Mem(itemsize=8, align=8)),
("Some(complex64)", Mem(itemsize=8, align=4)),
("Some(complex128)", Mem(itemsize=16, align=8)),
# Optional constructors
("?Some(bool)", Mem(itemsize=1, align=1)),
("?Some(int8)", Mem(itemsize=1, align=1)),
("?Some(int16)", Mem(itemsize=2, align=2)),
("?Some(int32)", Mem(itemsize=4, align=4)),
("?Some(int64)", Mem(itemsize=8, align=8)),
("?Some(uint8)", Mem(itemsize=1, align=1)),
("?Some(uint16)", Mem(itemsize=2, align=2)),
("?Some(uint32)", Mem(itemsize=4, align=4)),
("?Some(uint64)", Mem(itemsize=8, align=8)),
("?Some(float32)", Mem(itemsize=4, align=4)),
("?Some(float64)", Mem(itemsize=8, align=8)),
("?Some(complex64)", Mem(itemsize=8, align=4)),
("?Some(complex128)", Mem(itemsize=16, align=8)),
# Constructors containing optional types
("Some(?bool)", Mem(itemsize=1, align=1)),
("Some(?int8)", Mem(itemsize=1, align=1)),
("Some(?int16)", Mem(itemsize=2, align=2)),
("Some(?int32)", Mem(itemsize=4, align=4)),
("Some(?int64)", Mem(itemsize=8, align=8)),
("Some(?uint8)", Mem(itemsize=1, align=1)),
("Some(?uint16)", Mem(itemsize=2, align=2)),
("Some(?uint32)", Mem(itemsize=4, align=4)),
("Some(?uint64)", Mem(itemsize=8, align=8)),
("Some(?float32)", Mem(itemsize=4, align=4)),
("Some(?float64)", Mem(itemsize=8, align=8)),
("Some(?complex64)", Mem(itemsize=8, align=4)),
("Some(?complex128)", Mem(itemsize=16, align=8)),
]
# ======================================================================
# Broadcasting
# ======================================================================
def genindices(factor):
for i in range(4):
yield ()
for i in range(4):
yield (factor * i,)
for i in range(4):
for j in range(4):
yield (factor * i, factor * j)
for i in range(4):
for j in range(4):
for k in range(4):
yield (factor * i, factor * j, factor * k)
BROADCAST_TEST_CASES = [
dict(sig=ndt("uint8 -> float64"),
args=[ndt("uint8")],
out=None,
spec= ApplySpec(
flags = 'C|Fortran|Strided|Xnd',
outer_dims = 0,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("uint8"), ndt("float64")])),
dict(sig=ndt("... * uint8 -> ... * float64"),
args=[ndt("2 * uint8")],
out=None,
spec=ApplySpec(
flags = 'OptZ|OptC|OptS|C|Fortran|Strided|Xnd',
outer_dims = 1,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("2 * uint8"), ndt("2 * float64")])),
dict(sig=ndt("F[... * uint8] -> F[... * float64]"),
args=[ndt("!2 * 3 * uint8")],
out=None,
spec=ApplySpec(
flags = 'OptS|C|Fortran|Strided|Xnd',
outer_dims = 2,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("!2 * 3 * uint8"), ndt("!2 * 3 * float64")])),
dict(sig=ndt("... * uint8 -> ... * float64"),
args=[ndt("fixed(shape=2, step=10) * uint8")],
out=None,
spec=ApplySpec(
flags = 'OptS|C|Fortran|Strided|Xnd',
outer_dims = 1,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("fixed(shape=2, step=10) * uint8"), ndt("2 * float64")])),
dict(sig=ndt("... * N * uint8 -> ... * N * float64"),
args=[ndt("fixed(shape=2, step=10) * uint8")],
out=None,
spec=ApplySpec(
flags = 'Strided|Xnd',
outer_dims = 0,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("fixed(shape=2, step=10) * uint8"), ndt("2 * float64")])),
dict(sig=ndt("... * N * uint8 -> ... * N * float64"),
args=[ndt("2 * 3 * uint8")],
out=None,
spec=ApplySpec(
flags = 'OptZ|OptC|OptS|C|Fortran|Strided|Xnd' ,
outer_dims = 1,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("2 * 3 * uint8"), ndt("2 * 3 * float64")])),
dict(sig=ndt("... * N * M * uint8 -> ... * N * M * float64"),
args=[ndt("2 * 3 * uint8")],
out=None,
spec=ApplySpec(
flags = 'C|Strided|Xnd',
outer_dims = 0,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("2 * 3 * uint8"), ndt("2 * 3 * float64")])),
dict(sig=ndt("var... * float64 -> var... * float64"),
args=[ndt("var(offsets=[0,2]) * var(offsets=[0,4,11]) * float64")],
out=None,
spec=ApplySpec(
flags = 'Xnd',
outer_dims = 2,
nin = 1,
nout = 1,
nargs = 2,
types = [ndt("var(offsets=[0,2]) * var(offsets=[0,4,11]) * float64"),
ndt("var(offsets=[0,2]) * var(offsets=[0,4,11]) * float64")])),
]
|
skrah/ndtypes
|
python/ndt_randtype.py
|
Python
|
bsd-3-clause
| 16,372
|
SECRET_KEY = 'spam'
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ROOT_URLCONF = 'tests.urls'
INSTALLED_APPS = ['tests']
DATABASES = {'default': {'NAME': 'db.sqlite',
'ENGINE': 'django.db.backends.sqlite3'}}
# Django < 1.8
TEMPLATE_CONTEXT_PROCESSORS = [
'django_settings_export.settings_export'
]
# Django 1.8+
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django_settings_export.settings_export',
],
},
},
]
FOO = 'foo'
BAR = 'bar'
SETTINGS_EXPORT = [
'FOO',
'BAR',
]
|
jkbrzt/django-settings-export
|
tests/settings.py
|
Python
|
bsd-3-clause
| 688
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import os
import tempfile
import unittest
import uuid
import qiime2.core.type
from qiime2.sdk import Artifact
from qiime2.sdk.result import ResultMetadata
import qiime2.core.archive as archive
from qiime2.core.testing.type import IntSequence1, FourInts, Mapping
from qiime2.core.testing.util import get_dummy_plugin, ArchiveTestingMixin
class TestArtifact(unittest.TestCase, ArchiveTestingMixin):
def setUp(self):
# Ignore the returned dummy plugin object, just run this to verify the
# plugin exists as the tests rely on it being loaded.
get_dummy_plugin()
# TODO standardize temporary directories created by QIIME 2
self.test_dir = tempfile.TemporaryDirectory(prefix='qiime2-test-temp-')
self.provenance_capture = archive.ImportProvenanceCapture()
def tearDown(self):
self.test_dir.cleanup()
def test_private_constructor(self):
with self.assertRaisesRegex(
NotImplementedError,
'Artifact constructor.*private.*Artifact.load'):
Artifact()
# Note on testing strategy below: many of the tests for `_from_view` and
# `load` are similar, with the exception that when `load`ing, the
# artifact's UUID is known so more specific assertions can be performed.
# While these tests appear somewhat redundant, they are important because
# they exercise the same operations on Artifact objects constructed from
# different sources, whose codepaths have very different internal behavior.
# This internal behavior could be tested explicitly but it is safer to test
# the public API behavior (e.g. as a user would interact with the object)
# in case the internals change.
def test_from_view(self):
artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list,
self.provenance_capture)
self.assertEqual(artifact.type, FourInts)
# We don't know what the UUID is because it's generated within
# Artifact._from_view.
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
# Can produce same view if called again.
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
def test_from_view_different_type_with_multiple_view_types(self):
artifact = Artifact._from_view(IntSequence1, [42, 42, 43, -999, 42],
list, self.provenance_capture)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
def test_from_view_and_save(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
# Using four-ints data layout because it has multiple files, some of
# which are in a nested directory.
artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list,
self.provenance_capture)
artifact.save(fp)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp, root_dir, expected)
def test_load(self):
saved_artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
saved_artifact.save(fp)
artifact = Artifact.load(fp)
self.assertEqual(artifact.type, FourInts)
self.assertEqual(artifact.uuid, saved_artifact.uuid)
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
self.assertEqual(artifact.view(list), [-1, 42, 0, 43])
def test_load_different_type_with_multiple_view_types(self):
saved_artifact = Artifact.import_data(IntSequence1,
[42, 42, 43, -999, 42])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
saved_artifact.save(fp)
artifact = Artifact.load(fp)
self.assertEqual(artifact.type, IntSequence1)
self.assertEqual(artifact.uuid, saved_artifact.uuid)
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(list),
[42, 42, 43, -999, 42])
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
self.assertEqual(artifact.view(collections.Counter),
collections.Counter({42: 3, 43: 1, -999: 1}))
def test_load_and_save(self):
fp1 = os.path.join(self.test_dir.name, 'artifact1.qza')
fp2 = os.path.join(self.test_dir.name, 'artifact2.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp1)
artifact = Artifact.load(fp1)
# Overwriting its source file works.
artifact.save(fp1)
# Saving to a new file works.
artifact.save(fp2)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp1, root_dir, expected)
root_dir = str(artifact.uuid)
expected = {
'VERSION',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/action/action.yaml'
}
self.assertArchiveMembers(fp2, root_dir, expected)
def test_roundtrip(self):
fp1 = os.path.join(self.test_dir.name, 'artifact1.qza')
fp2 = os.path.join(self.test_dir.name, 'artifact2.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp1)
artifact1 = Artifact.load(fp1)
artifact1.save(fp2)
artifact2 = Artifact.load(fp2)
self.assertEqual(artifact1.type, artifact2.type)
self.assertEqual(artifact1.format, artifact2.format)
self.assertEqual(artifact1.uuid, artifact2.uuid)
self.assertEqual(artifact1.view(list),
artifact2.view(list))
# double view to make sure multiple views can be taken
self.assertEqual(artifact1.view(list),
artifact2.view(list))
def test_load_with_archive_filepath_modified(self):
# Save an artifact for use in the following test case.
fp = os.path.join(self.test_dir.name, 'artifact.qza')
Artifact.import_data(FourInts, [-1, 42, 0, 43]).save(fp)
# Load the artifact from a filepath then save a different artifact to
# the same filepath. Assert that both artifacts produce the correct
# views of their data.
#
# `load` used to be lazy, only extracting data when it needed to (e.g.
# when `save` or `view` was called). This was buggy as the filepath
# could have been deleted, or worse, modified to contain a different
# .qza file. Thus, the wrong archive could be extracted on demand, or
# the archive could be missing altogether. There isn't an easy
# cross-platform compatible way to solve this problem, so Artifact.load
# is no longer lazy and always extracts its data immediately. The real
# motivation for lazy loading was for quick inspection of archives
# without extracting/copying data, so that API is now provided through
# Artifact.peek.
artifact1 = Artifact.load(fp)
Artifact.import_data(FourInts, [10, 11, 12, 13]).save(fp)
artifact2 = Artifact.load(fp)
self.assertEqual(artifact1.view(list), [-1, 42, 0, 43])
self.assertEqual(artifact2.view(list), [10, 11, 12, 13])
def test_extract(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact.save(fp)
root_dir = str(artifact.uuid)
output_dir = os.path.join(self.test_dir.name, 'artifact-extract-test')
result_dir = Artifact.extract(fp, output_dir=output_dir)
self.assertEqual(result_dir, os.path.join(output_dir, root_dir))
expected = {
'VERSION',
'metadata.yaml',
'data/file1.txt',
'data/file2.txt',
'data/nested/file3.txt',
'data/nested/file4.txt',
'provenance/metadata.yaml',
'provenance/VERSION',
'provenance/action/action.yaml'
}
self.assertExtractedArchiveMembers(output_dir, root_dir, expected)
def test_peek(self):
artifact = Artifact.import_data(FourInts, [0, 0, 42, 1000])
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact.save(fp)
metadata = Artifact.peek(fp)
self.assertIsInstance(metadata, ResultMetadata)
self.assertEqual(metadata.type, 'FourInts')
self.assertEqual(metadata.uuid, str(artifact.uuid))
self.assertEqual(metadata.format, 'FourIntsDirectoryFormat')
def test_import_data_invalid_type(self):
with self.assertRaisesRegex(TypeError,
'concrete semantic type.*Visualization'):
Artifact.import_data(qiime2.core.type.Visualization, self.test_dir)
with self.assertRaisesRegex(TypeError,
'concrete semantic type.*Visualization'):
Artifact.import_data('Visualization', self.test_dir)
def test_import_data_with_filepath_multi_file_data_layout(self):
fp = os.path.join(self.test_dir.name, 'test.txt')
with open(fp, 'w') as fh:
fh.write('42\n')
with self.assertRaisesRegex(ValueError,
"FourIntsDirectoryFormat.*directory"):
Artifact.import_data(FourInts, fp)
def test_import_data_with_wrong_number_of_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
error_regex = ("Missing.*MappingDirectoryFormat.*mapping.tsv")
with self.assertRaisesRegex(ValueError, error_regex):
Artifact.import_data(Mapping, data_dir)
def test_import_data_with_unrecognized_files(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('42\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('43\n')
nested = os.path.join(data_dir, 'nested')
os.mkdir(nested)
with open(os.path.join(nested, 'file3.txt'), 'w') as fh:
fh.write('44\n')
with open(os.path.join(nested, 'foo.txt'), 'w') as fh:
fh.write('45\n')
error_regex = ("Unrecognized.*foo.txt.*FourIntsDirectoryFormat")
with self.assertRaisesRegex(ValueError, error_regex):
Artifact.import_data(FourInts, data_dir)
def test_import_data_with_unreachable_path(self):
with self.assertRaisesRegex(ValueError, "does not exist"):
Artifact.import_data(IntSequence1,
os.path.join(self.test_dir.name, 'foo.txt'))
with self.assertRaisesRegex(ValueError, "does not exist"):
Artifact.import_data(FourInts,
os.path.join(self.test_dir.name, 'bar', ''))
def test_import_data_with_invalid_format_single_file(self):
fp = os.path.join(self.test_dir.name, 'foo.txt')
with open(fp, 'w') as fh:
fh.write('42\n')
fh.write('43\n')
fh.write('abc\n')
fh.write('123\n')
error_regex = "foo.txt.*IntSequenceFormat"
with self.assertRaisesRegex(ValueError, error_regex):
Artifact.import_data(IntSequence1, fp)
def test_import_data_with_invalid_format_multi_file(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('42\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('43\n')
nested = os.path.join(data_dir, 'nested')
os.mkdir(nested)
with open(os.path.join(nested, 'file3.txt'), 'w') as fh:
fh.write('44\n')
with open(os.path.join(nested, 'file4.txt'), 'w') as fh:
fh.write('foo\n')
error_regex = "file4.txt.*SingleIntFormat"
with self.assertRaisesRegex(ValueError, error_regex):
Artifact.import_data(FourInts, data_dir)
def test_import_data_with_filepath(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
# Filename shouldn't matter for single-file case.
fp = os.path.join(data_dir, 'foo.txt')
with open(fp, 'w') as fh:
fh.write('42\n')
fh.write('43\n')
fh.write('42\n')
fh.write('0\n')
artifact = Artifact.import_data(IntSequence1, fp)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [42, 43, 42, 0])
def test_import_data_with_directory_single_file(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
fp = os.path.join(data_dir, 'ints.txt')
with open(fp, 'w') as fh:
fh.write('-1\n')
fh.write('-2\n')
fh.write('10\n')
fh.write('100\n')
artifact = Artifact.import_data(IntSequence1, data_dir)
self.assertEqual(artifact.type, IntSequence1)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [-1, -2, 10, 100])
def test_import_data_with_directory_multi_file(self):
data_dir = os.path.join(self.test_dir.name, 'test')
os.mkdir(data_dir)
with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh:
fh.write('42\n')
with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh:
fh.write('41\n')
nested = os.path.join(data_dir, 'nested')
os.mkdir(nested)
with open(os.path.join(nested, 'file3.txt'), 'w') as fh:
fh.write('43\n')
with open(os.path.join(nested, 'file4.txt'), 'w') as fh:
fh.write('40\n')
artifact = Artifact.import_data(FourInts, data_dir)
self.assertEqual(artifact.type, FourInts)
self.assertIsInstance(artifact.uuid, uuid.UUID)
self.assertEqual(artifact.view(list), [42, 41, 43, 40])
def test_eq_identity(self):
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
self.assertEqual(artifact, artifact)
def test_eq_same_uuid(self):
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact1.save(fp)
artifact2 = Artifact.load(fp)
self.assertEqual(artifact1, artifact2)
def test_ne_same_data_different_uuid(self):
artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact2 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
self.assertNotEqual(artifact1, artifact2)
def test_ne_different_data_different_uuid(self):
artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43])
artifact2 = Artifact.import_data(FourInts, [1, 2, 3, 4])
self.assertNotEqual(artifact1, artifact2)
def test_ne_subclass_same_uuid(self):
class ArtifactSubclass(Artifact):
pass
fp = os.path.join(self.test_dir.name, 'artifact.qza')
artifact1 = ArtifactSubclass.import_data(FourInts, [-1, 42, 0, 43])
artifact1.save(fp)
artifact2 = Artifact.load(fp)
self.assertNotEqual(artifact1, artifact2)
self.assertNotEqual(artifact2, artifact1)
def test_ne_different_type_same_uuid(self):
artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43])
class Faker:
@property
def uuid(self):
return artifact.uuid
faker = Faker()
self.assertNotEqual(artifact, faker)
if __name__ == '__main__':
unittest.main()
|
jairideout/qiime2
|
qiime2/sdk/tests/test_artifact.py
|
Python
|
bsd-3-clause
| 17,709
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
astroquery.solarsystem.jpl
--------------------------
a collection of data services provided by JPL
"""
from .sbdb import *
from .horizons import *
from . import *
|
imbasimba/astroquery
|
astroquery/solarsystem/jpl/__init__.py
|
Python
|
bsd-3-clause
| 235
|
# -*- coding: utf-8 -*-
import os
from cookiecutter import repository, exceptions
import pytest
def test_finds_local_repo(tmpdir):
"""A valid local repository should be returned."""
project_dir = repository.determine_repo_dir(
'tests/fake-repo',
abbreviations={},
clone_to_dir=str(tmpdir),
checkout=None,
no_input=True
)
assert 'tests/fake-repo' == project_dir
def test_local_repo_with_no_context_raises(tmpdir):
"""A local repository without a cookiecutter.json should raise a
`RepositoryNotFound` exception.
"""
template_path = os.path.join('tests', 'fake-repo-bad')
with pytest.raises(exceptions.RepositoryNotFound) as err:
repository.determine_repo_dir(
template_path,
abbreviations={},
clone_to_dir=str(tmpdir),
checkout=None,
no_input=True
)
assert str(err.value) == (
'A valid repository for "{}" could not be found in the following '
'locations:\n{}'.format(
template_path,
'\n'.join([
template_path,
str(tmpdir / 'tests/fake-repo-bad')
]),
)
)
def test_local_repo_typo(tmpdir):
"""An unknown local repository should raise a `RepositoryNotFound`
exception.
"""
template_path = os.path.join('tests', 'unknown-repo')
with pytest.raises(exceptions.RepositoryNotFound) as err:
repository.determine_repo_dir(
template_path,
abbreviations={},
clone_to_dir=str(tmpdir),
checkout=None,
no_input=True
)
assert str(err.value) == (
'A valid repository for "{}" could not be found in the following '
'locations:\n{}'.format(
template_path,
'\n'.join([
template_path,
str(tmpdir / 'tests/unknown-repo')
]),
)
)
|
stevepiercy/cookiecutter
|
tests/repository/test_determine_repository_should_use_local_repo.py
|
Python
|
bsd-3-clause
| 1,964
|
# -*- coding: utf-8 -*-
import logging
from unittest import mock
import olympia.core.logger
from olympia.amo.tests import TestCase
from olympia.users.models import UserProfile
class LoggerTests(TestCase):
@mock.patch('olympia.core.get_remote_addr', lambda: '127.0.0.1')
@mock.patch('olympia.core.get_user', lambda: UserProfile(username=u'fôo'))
def test_get_logger_adapter(self):
log = olympia.core.logger.getLogger('test')
expected_kwargs = {
'extra': {
'REMOTE_ADDR': '127.0.0.1',
'USERNAME': u'fôo',
}
}
assert log.process('test msg', {}) == ('test msg', expected_kwargs)
@mock.patch('olympia.core.get_remote_addr', lambda: '127.0.0.1')
@mock.patch('olympia.core.get_user', lambda: None)
def test_logger_adapter_user_is_none(self):
log = olympia.core.logger.getLogger('test')
expected_kwargs = {
'extra': {
'REMOTE_ADDR': '127.0.0.1',
'USERNAME': '<anon>',
}
}
assert log.process('test msg', {}) == ('test msg', expected_kwargs)
@mock.patch('olympia.core.get_remote_addr', lambda: None)
@mock.patch('olympia.core.get_user', lambda: UserProfile(username='bar'))
def test_logger_adapter_addr_is_none(self):
log = olympia.core.logger.getLogger('test')
expected_kwargs = {
'extra': {
'REMOTE_ADDR': '',
'USERNAME': 'bar',
}
}
assert log.process('test msg', {}) == ('test msg', expected_kwargs)
def test_formatter(self):
formatter = olympia.core.logger.Formatter()
record = logging.makeLogRecord({})
formatter.format(record)
assert 'USERNAME' in record.__dict__
assert 'REMOTE_ADDR' in record.__dict__
def test_json_formatter(self):
formatter = olympia.core.logger.JsonFormatter()
record = logging.makeLogRecord({})
# These would be set by the adapter.
record.__dict__['USERNAME'] = 'foo'
record.__dict__['REMOTE_ADDR'] = '127.0.0.1'
formatter.format(record)
assert record.__dict__['uid'] == 'foo'
assert record.__dict__['remoteAddressChain'] == '127.0.0.1'
|
kumar303/addons-server
|
src/olympia/core/tests/test_logger.py
|
Python
|
bsd-3-clause
| 2,279
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
#
# License: Simplified BSD
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_allclose,
assert_array_less, assert_array_equal)
import pytest
import mne
from mne.datasets import testing
from mne.label import read_label
from mne import (read_cov, read_forward_solution, read_evokeds,
convert_forward_solution)
from mne.inverse_sparse import mixed_norm, tf_mixed_norm
from mne.inverse_sparse.mxne_inverse import make_stc_from_dipoles, _split_gof
from mne.inverse_sparse.mxne_inverse import _compute_mxne_sure
from mne.inverse_sparse.mxne_optim import norm_l2inf
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.minimum_norm.tests.test_inverse import \
assert_var_exp_log, assert_stc_res
from mne.utils import assert_stcs_equal, catch_logging, _record_warnings
from mne.dipole import Dipole
from mne.source_estimate import VolSourceEstimate
from mne.simulation import simulate_sparse_stc, simulate_evoked
data_path = testing.data_path(download=False)
# NOTE: These use the ave and cov from sample dataset (no _trunc)
fname_data = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
label = 'Aud-rh'
fname_label = op.join(data_path, 'MEG', 'sample', 'labels', '%s.label' % label)
@pytest.fixture(scope='module', params=[testing._pytest_param])
def forward():
"""Get a forward solution."""
# module scope it for speed (but don't overwrite in use!)
return read_forward_solution(fname_fwd)
@testing.requires_testing_data
@pytest.mark.timeout(150) # ~30 sec on Travis Linux
@pytest.mark.slowtest
def test_mxne_inverse_standard(forward):
"""Test (TF-)MxNE inverse computation."""
# Read noise covariance matrix
cov = read_cov(fname_cov)
# Handling average file
loose = 0.0
depth = 0.9
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
label = read_label(fname_label)
assert label.hemi == 'rh'
forward = convert_forward_solution(forward, surf_ori=True)
# Reduce source space to make test computation faster
inverse_operator = make_inverse_operator(evoked_l21.info, forward, cov,
loose=loose, depth=depth,
fixed=True, use_cps=True)
stc_dspm = apply_inverse(evoked_l21, inverse_operator, lambda2=1. / 9.,
method='dSPM')
stc_dspm.data[np.abs(stc_dspm.data) < 12] = 0.0
stc_dspm.data[np.abs(stc_dspm.data) >= 12] = 1.
weights_min = 0.5
# MxNE tests
alpha = 70 # spatial regularization parameter
with _record_warnings(): # CD
stc_cd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
active_set_size=10, weights=stc_dspm,
weights_min=weights_min, solver='cd')
stc_bcd = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='bcd')
assert_array_almost_equal(stc_cd.times, evoked_l21.times, 5)
assert_array_almost_equal(stc_bcd.times, evoked_l21.times, 5)
assert_allclose(stc_cd.data, stc_bcd.data, rtol=1e-3, atol=0.0)
assert stc_cd.vertices[1][0] in label.vertices
assert stc_bcd.vertices[1][0] in label.vertices
# vector
with _record_warnings(): # no convergence
stc = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2)
with _record_warnings(): # no convergence
stc_vec = mixed_norm(evoked_l21, forward, cov, alpha, loose=1, maxit=2,
pick_ori='vector')
assert_stcs_equal(stc_vec.magnitude(), stc)
with _record_warnings(), \
pytest.raises(ValueError, match='pick_ori='):
mixed_norm(evoked_l21, forward, cov, alpha, loose=0, maxit=2,
pick_ori='vector')
with _record_warnings(), catch_logging() as log: # CD
dips = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True, verbose=True)
stc_dip = make_stc_from_dipoles(dips, forward['src'])
assert isinstance(dips[0], Dipole)
assert stc_dip.subject == "sample"
assert_stcs_equal(stc_cd, stc_dip)
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
# Single time point things should match
with _record_warnings(), catch_logging() as log:
dips = mixed_norm(evoked_l21.copy().crop(0.081, 0.081),
forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
weights=stc_dspm, weights_min=weights_min,
solver='cd', return_as_dipoles=True, verbose=True)
assert_var_exp_log(log.getvalue(), 37.8, 38.0) # 37.9
gof = sum(dip.gof[0] for dip in dips) # these are now partial exp vars
assert_allclose(gof, 37.9, atol=0.1)
with _record_warnings(), catch_logging() as log:
stc, res = mixed_norm(evoked_l21, forward, cov, alpha, loose=loose,
depth=depth, maxit=300, tol=1e-8,
weights=stc_dspm, # gh-6382
active_set_size=10, return_residual=True,
solver='cd', verbose=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
assert stc.data.min() < -1e-9 # signed
assert_stc_res(evoked_l21, stc, forward, res)
# irMxNE tests
with _record_warnings(), catch_logging() as log: # CD
stc, residual = mixed_norm(
evoked_l21, forward, cov, alpha, n_mxne_iter=5, loose=0.0001,
depth=depth, maxit=300, tol=1e-8, active_set_size=10,
solver='cd', return_residual=True, pick_ori='vector', verbose=True)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert stc.vertices[1][0] in label.vertices
assert stc.vertices == [[63152], [79017]]
assert_var_exp_log(log.getvalue(), 51, 53) # 51.8
assert_stc_res(evoked_l21, stc, forward, residual)
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, forward, cov,
loose=loose, depth=depth, maxit=100, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, return_residual=True,
alpha=alpha, l1_ratio=l1_ratio)
assert_array_almost_equal(stc.times, evoked.times, 5)
assert stc.vertices[1][0] in label.vertices
# vector
stc_nrm = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio)
stc_vec, residual = tf_mixed_norm(
evoked, forward, cov, loose=1, depth=depth, maxit=2, tol=1e-4,
tstep=4, wsize=16, window=0.1, weights=stc_dspm,
weights_min=weights_min, alpha=alpha, l1_ratio=l1_ratio,
pick_ori='vector', return_residual=True)
assert_stcs_equal(stc_vec.magnitude(), stc_nrm)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=101, l1_ratio=0.03)
pytest.raises(ValueError, tf_mixed_norm, evoked, forward, cov,
alpha=50., l1_ratio=1.01)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_mxne_vol_sphere():
"""Test (TF-)MxNE with a sphere forward and volumic source space."""
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.crop(tmin=-0.05, tmax=0.2)
cov = read_cov(fname_cov)
evoked_l21 = evoked.copy()
evoked_l21.crop(tmin=0.081, tmax=0.1)
info = evoked.info
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
src = mne.setup_volume_source_space(subject=None, pos=15., mri=None,
sphere=(0.0, 0.0, 0.0, 0.08),
bem=None, mindist=5.0,
exclude=2.0, sphere_units='m')
fwd = mne.make_forward_solution(info, trans=None, src=src,
bem=sphere, eeg=False, meg=True)
alpha = 80.
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.0, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
pytest.raises(ValueError, mixed_norm, evoked, fwd, cov, alpha,
loose=0.2, return_residual=False,
maxit=3, tol=1e-8, active_set_size=10)
# irMxNE tests
with catch_logging() as log:
stc = mixed_norm(evoked_l21, fwd, cov, alpha,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, verbose=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked_l21.times, 5)
assert_var_exp_log(log.getvalue(), 9, 11) # 10.2
# Compare orientation obtained using fit_dipole and gamma_map
# for a simulated evoked containing a single dipole
stc = mne.VolSourceEstimate(50e-9 * np.random.RandomState(42).randn(1, 4),
vertices=[stc.vertices[0][:1]],
tmin=stc.tmin,
tstep=stc.tstep)
evoked_dip = mne.simulation.simulate_evoked(fwd, stc, info, cov, nave=1e9,
use_cps=True)
dip_mxne = mixed_norm(evoked_dip, fwd, cov, alpha=80,
n_mxne_iter=1, maxit=30, tol=1e-8,
active_set_size=10, return_as_dipoles=True)
amp_max = [np.max(d.amplitude) for d in dip_mxne]
dip_mxne = dip_mxne[np.argmax(amp_max)]
assert dip_mxne.pos[0] in src[0]['rr'][stc.vertices[0]]
dip_fit = mne.fit_dipole(evoked_dip, cov, sphere)[0]
assert np.abs(np.dot(dip_fit.ori[0], dip_mxne.ori[0])) > 0.99
dist = 1000 * np.linalg.norm(dip_fit.pos[0] - dip_mxne.pos[0])
assert dist < 4. # within 4 mm
# Do with TF-MxNE for test memory savings
alpha = 60. # overall regularization parameter
l1_ratio = 0.01 # temporal regularization proportion
stc, _ = tf_mixed_norm(evoked, fwd, cov, maxit=3, tol=1e-4,
tstep=16, wsize=32, window=0.1, alpha=alpha,
l1_ratio=l1_ratio, return_residual=True)
assert isinstance(stc, VolSourceEstimate)
assert_array_almost_equal(stc.times, evoked.times, 5)
@pytest.mark.parametrize('mod', (
None, 'mult', 'augment', 'sign', 'zero', 'less'))
def test_split_gof_basic(mod):
"""Test splitting the goodness of fit."""
# first a trivial case
gain = np.array([[0., 1., 1.], [1., 1., 0.]]).T
M = np.ones((3, 1))
X = np.ones((2, 1))
M_est = gain @ X
assert_allclose(M_est, np.array([[1., 2., 1.]]).T) # a reasonable estimate
if mod == 'mult':
gain *= [1., -0.5]
X[1] *= -2
elif mod == 'augment':
gain = np.concatenate((gain, np.zeros((3, 1))), axis=1)
X = np.concatenate((X, [[1.]]))
elif mod == 'sign':
gain[1] *= -1
M[1] *= -1
M_est[1] *= -1
elif mod in ('zero', 'less'):
gain = np.array([[1, 1., 1.], [1., 1., 1.]]).T
if mod == 'zero':
X[:, 0] = [1., 0.]
else:
X[:, 0] = [1., 0.5]
M_est = gain @ X
else:
assert mod is None
res = M - M_est
gof = 100 * (1. - (res * res).sum() / (M * M).sum())
gof_split = _split_gof(M, X, gain)
assert_allclose(gof_split.sum(), gof)
want = gof_split[[0, 0]]
if mod == 'augment':
want = np.concatenate((want, [[0]]))
if mod in ('mult', 'less'):
assert_array_less(gof_split[1], gof_split[0])
elif mod == 'zero':
assert_allclose(gof_split[0], gof_split.sum(0))
assert_allclose(gof_split[1], 0., atol=1e-6)
else:
assert_allclose(gof_split, want, atol=1e-12)
@testing.requires_testing_data
@pytest.mark.parametrize('idx, weights', [
# empirically determined approximately orthogonal columns: 0, 15157, 19448
([0], [1]),
([0, 15157], [1, 1]),
([0, 15157], [1, 3]),
([0, 15157], [5, -1]),
([0, 15157, 19448], [1, 1, 1]),
([0, 15157, 19448], [1e-2, 1, 5]),
])
def test_split_gof_meg(forward, idx, weights):
"""Test GOF splitting on MEG data."""
gain = forward['sol']['data'][:, idx]
# close to orthogonal
norms = np.linalg.norm(gain, axis=0)
triu = np.triu_indices(len(idx), 1)
prods = np.abs(np.dot(gain.T, gain) / np.outer(norms, norms))[triu]
assert_array_less(prods, 5e-3) # approximately orthogonal
# first, split across time (one dipole per time point)
M = gain * weights
gof_split = _split_gof(M, np.diag(weights), gain)
assert_allclose(gof_split.sum(0), 100., atol=1e-5) # all sum to 100
assert_allclose(gof_split, 100 * np.eye(len(weights)), atol=1) # loc
# next, summed to a single time point (all dipoles active at one time pt)
weights = np.array(weights)[:, np.newaxis]
x = gain @ weights
assert x.shape == (gain.shape[0], 1)
gof_split = _split_gof(x, weights, gain)
want = (norms * weights.T).T ** 2
want = 100 * want / want.sum()
assert_allclose(gof_split, want, atol=1e-3, rtol=1e-2)
assert_allclose(gof_split.sum(), 100, rtol=1e-5)
@pytest.mark.parametrize('n_sensors, n_dipoles, n_times', [
(10, 15, 7),
(20, 60, 20),
])
@pytest.mark.parametrize('nnz', [2, 4])
@pytest.mark.parametrize('corr', [0.75])
@pytest.mark.parametrize('n_orient', [1, 3])
def test_mxne_inverse_sure_synthetic(n_sensors, n_dipoles, n_times, nnz, corr,
n_orient, snr=4):
"""Tests SURE criterion for automatic alpha selection on synthetic data."""
rng = np.random.RandomState(0)
sigma = np.sqrt(1 - corr ** 2)
U = rng.randn(n_sensors)
# generate gain matrix
G = np.empty([n_sensors, n_dipoles], order='F')
G[:, :n_orient] = np.expand_dims(U, axis=-1)
n_dip_per_pos = n_dipoles // n_orient
for j in range(1, n_dip_per_pos):
U *= corr
U += sigma * rng.randn(n_sensors)
G[:, j * n_orient:(j + 1) * n_orient] = np.expand_dims(U, axis=-1)
# generate coefficient matrix
support = rng.choice(n_dip_per_pos, nnz, replace=False)
X = np.zeros((n_dipoles, n_times))
for k in support:
X[k * n_orient:(k + 1) * n_orient, :] = rng.normal(
size=(n_orient, n_times))
# generate measurement matrix
M = G @ X
noise = rng.randn(n_sensors, n_times)
sigma = 1 / np.linalg.norm(noise) * np.linalg.norm(M) / snr
M += sigma * noise
# inverse modeling with sure
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
alpha_grid = np.geomspace(alpha_max, alpha_max / 10, num=15)
_, active_set, _ = _compute_mxne_sure(M, G, alpha_grid, sigma=sigma,
n_mxne_iter=5, maxit=3000, tol=1e-4,
n_orient=n_orient,
active_set_size=10, debias=True,
solver="auto", dgap_freq=10,
random_state=0, verbose=False)
assert np.count_nonzero(active_set, axis=-1) == n_orient * nnz
@pytest.mark.slowtest # slow on Azure
@testing.requires_testing_data
def test_mxne_inverse_sure():
"""Tests SURE criterion for automatic alpha selection on MEG data."""
def data_fun(times):
data = np.zeros(times.shape)
data[times >= 0] = 50e-9
return data
n_dipoles = 2
raw = mne.io.read_raw_fif(fname_raw)
info = mne.io.read_info(fname_data)
with info._unlock():
info['projs'] = []
noise_cov = mne.make_ad_hoc_cov(info)
label_names = ['Aud-lh', 'Aud-rh']
labels = [
mne.read_label(data_path / 'MEG' / 'sample' / 'labels' / f'{ln}.label')
for ln in label_names]
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
forward = mne.read_forward_solution(fname_fwd)
forward = mne.pick_types_forward(forward, meg="grad", eeg=False,
exclude=raw.info['bads'])
times = np.arange(100, dtype=np.float64) / raw.info['sfreq'] - 0.1
stc = simulate_sparse_stc(forward['src'], n_dipoles=n_dipoles, times=times,
random_state=1, labels=labels, data_fun=data_fun)
nave = 30
evoked = simulate_evoked(forward, stc, info, noise_cov, nave=nave,
use_cps=False, iir_filter=None)
evoked = evoked.crop(tmin=0, tmax=10e-3)
stc_ = mixed_norm(evoked, forward, noise_cov, loose=0.9, n_mxne_iter=5,
depth=0.9)
assert_array_equal(stc_.vertices, stc.vertices)
@pytest.mark.slowtest # slow on Azure
@testing.requires_testing_data
def test_mxne_inverse_empty():
"""Tests solver with too high alpha."""
evoked = read_evokeds(fname_data, condition=0, baseline=(None, 0))
evoked.pick("grad", exclude="bads")
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
forward = mne.read_forward_solution(fname_fwd)
forward = mne.pick_types_forward(forward, meg="grad", eeg=False,
exclude=evoked.info['bads'])
cov = read_cov(fname_cov)
with pytest.warns(RuntimeWarning, match='too big'):
stc, residual = mixed_norm(
evoked, forward, cov, n_mxne_iter=3, alpha=99,
return_residual=True)
assert stc.data.size == 0
assert stc.vertices[0].size == 0
assert stc.vertices[1].size == 0
assert_allclose(evoked.data, residual.data)
|
mne-tools/mne-python
|
mne/inverse_sparse/tests/test_mxne_inverse.py
|
Python
|
bsd-3-clause
| 18,922
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.contrib.auth.hashers import make_password
class Migration(DataMigration):
def forwards(self, orm):
"""Adds a user to be used for migrations."""
# ``make_password(None)`` makes an unusable password.
orm['auth.User'].objects.create(
username='migrations',
password=make_password(None))
def backwards(self, orm):
"""Removes the user to be used for migrations."""
orm['auth.User'].objects.get(username='migrations').delete()
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'users.deactivation': {
'Meta': {'object_name': 'Deactivation'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'deactivations'", 'to': u"orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"})
},
u'users.emailchange': {
'Meta': {'object_name': 'EmailChange'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'users.profile': {
'Meta': {'object_name': 'Profile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'irc_handle': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'locale': ('kitsune.sumo.models.LocaleField', [], {'default': "'en-US'", 'max_length': '7'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'public_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'timezone': ('timezones.fields.TimeZoneField', [], {'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'users.registrationprofile': {
'Meta': {'object_name': 'RegistrationProfile'},
'activation_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'users.setting': {
'Meta': {'unique_together': "(('user', 'name'),)", 'object_name': 'Setting'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'settings'", 'to': u"orm['auth.User']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '60', 'blank': 'True'})
}
}
complete_apps = ['users']
symmetrical = True
|
safwanrahman/linuxdesh
|
kitsune/users/migrations/0006_add_migration_user.py
|
Python
|
bsd-3-clause
| 7,501
|
"""
Tests for miscellaneous models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import pandas as pd
import os
import re
import warnings
from statsmodels.tsa.statespace import mlemodel
from statsmodels import datasets
from numpy.testing import assert_almost_equal, assert_equal, assert_allclose, assert_raises
from nose.exc import SkipTest
from .results import results_sarimax
current_path = os.path.dirname(os.path.abspath(__file__))
class Intercepts(mlemodel.MLEModel):
"""
Test class for observation and state intercepts (which usually don't
get tested in other models).
"""
def __init__(self, endog, **kwargs):
k_states = 3
k_posdef = 3
super(Intercepts, self).__init__(
endog, k_states=k_states, k_posdef=k_posdef, **kwargs)
self['design'] = np.eye(3)
self['obs_cov'] = np.eye(3)
self['transition'] = np.eye(3)
self['selection'] = np.eye(3)
self['state_cov'] = np.eye(3)
self.initialize_approximate_diffuse()
@property
def param_names(self):
return ['d.1', 'd.2', 'd.3', 'c.1', 'c.2', 'c.3']
@property
def start_params(self):
return np.arange(6)
def update(self, params, **kwargs):
params = super(Intercepts, self).update(params, **kwargs)
self['obs_intercept'] = params[:3]
self['state_intercept'] = params[3:]
class TestIntercepts(object):
@classmethod
def setup_class(cls, which='mixed', **kwargs):
# Results
path = current_path + os.sep + 'results/results_intercepts_R.csv'
cls.desired = pd.read_csv(path)
# Data
dta = datasets.macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-7-01',
freq='QS')
obs = dta[['realgdp', 'realcons', 'realinv']].copy()
obs = obs / obs.std()
if which == 'all':
obs.ix[:50, :] = np.nan
obs.ix[119:130, :] = np.nan
elif which == 'partial':
obs.ix[0:50, 0] = np.nan
obs.ix[119:130, 0] = np.nan
elif which == 'mixed':
obs.ix[0:50, 0] = np.nan
obs.ix[19:70, 1] = np.nan
obs.ix[39:90, 2] = np.nan
obs.ix[119:130, 0] = np.nan
obs.ix[119:130, 2] = np.nan
mod = Intercepts(obs, **kwargs)
cls.params = np.arange(6) + 1
cls.model = mod
cls.results = mod.smooth(cls.params, return_ssm=True)
# Calculate the determinant of the covariance matrices (for easy
# comparison to other languages without having to store 2-dim arrays)
cls.results.det_scaled_smoothed_estimator_cov = (
np.zeros((1, cls.model.nobs)))
cls.results.det_predicted_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_cov = np.zeros((1, cls.model.nobs))
cls.results.det_smoothed_state_disturbance_cov = (
np.zeros((1, cls.model.nobs)))
for i in range(cls.model.nobs):
cls.results.det_scaled_smoothed_estimator_cov[0, i] = (
np.linalg.det(
cls.results.scaled_smoothed_estimator_cov[:, :, i]))
cls.results.det_predicted_state_cov[0, i] = np.linalg.det(
cls.results.predicted_state_cov[:, :, i+1])
cls.results.det_smoothed_state_cov[0, i] = np.linalg.det(
cls.results.smoothed_state_cov[:, :, i])
cls.results.det_smoothed_state_disturbance_cov[0, i] = (
np.linalg.det(
cls.results.smoothed_state_disturbance_cov[:, :, i]))
def test_loglike(self):
assert_allclose(np.sum(self.results.llf_obs), -7924.03893566)
def test_scaled_smoothed_estimator(self):
assert_allclose(
self.results.scaled_smoothed_estimator.T,
self.desired[['r1', 'r2', 'r3']]
)
def test_scaled_smoothed_estimator_cov(self):
assert_allclose(
self.results.det_scaled_smoothed_estimator_cov.T,
self.desired[['detN']]
)
def test_forecasts(self):
assert_allclose(
self.results.forecasts.T,
self.desired[['m1', 'm2', 'm3']]
)
def test_forecasts_error(self):
assert_allclose(
self.results.forecasts_error.T,
self.desired[['v1', 'v2', 'v3']]
)
def test_forecasts_error_cov(self):
assert_allclose(
self.results.forecasts_error_cov.diagonal(),
self.desired[['F1', 'F2', 'F3']]
)
def test_predicted_states(self):
assert_allclose(
self.results.predicted_state[:, 1:].T,
self.desired[['a1', 'a2', 'a3']]
)
def test_predicted_states_cov(self):
assert_allclose(
self.results.det_predicted_state_cov.T,
self.desired[['detP']]
)
def test_smoothed_states(self):
assert_allclose(
self.results.smoothed_state.T,
self.desired[['alphahat1', 'alphahat2', 'alphahat3']]
)
def test_smoothed_states_cov(self):
assert_allclose(
self.results.det_smoothed_state_cov.T,
self.desired[['detV']]
)
def test_smoothed_forecasts(self):
assert_allclose(
self.results.smoothed_forecasts.T,
self.desired[['muhat1', 'muhat2', 'muhat3']]
)
def test_smoothed_state_disturbance(self):
assert_allclose(
self.results.smoothed_state_disturbance.T,
self.desired[['etahat1', 'etahat2', 'etahat3']]
)
def test_smoothed_state_disturbance_cov(self):
assert_allclose(
self.results.det_smoothed_state_disturbance_cov.T,
self.desired[['detVeta']]
)
def test_smoothed_measurement_disturbance(self):
assert_allclose(
self.results.smoothed_measurement_disturbance.T,
self.desired[['epshat1', 'epshat2', 'epshat3']], atol=1e-9
)
def test_smoothed_measurement_disturbance_cov(self):
assert_allclose(
self.results.smoothed_measurement_disturbance_cov.diagonal(),
self.desired[['Veps1', 'Veps2', 'Veps3']]
)
|
yl565/statsmodels
|
statsmodels/tsa/statespace/tests/test_models.py
|
Python
|
bsd-3-clause
| 6,374
|
"""
OptimizableComp finds whether a comprehension can be optimized.
"""
from pythran.analyses.identifiers import Identifiers
from pythran.passmanager import NodeAnalysis
class OptimizableComprehension(NodeAnalysis):
"""Find whether a comprehension can be optimized."""
def __init__(self):
self.result = set()
super(OptimizableComprehension, self).__init__(Identifiers)
def check_comprehension(self, iters):
targets = {gen.target.id for gen in iters}
optimizable = True
for it in iters:
ids = self.gather(Identifiers, it)
optimizable &= all(((ident == it.target.id) |
(ident not in targets)) for ident in ids)
return optimizable
def visit_ListComp(self, node):
if (self.check_comprehension(node.generators)):
self.result.add(node)
def visit_GeneratorExp(self, node):
if (self.check_comprehension(node.generators)):
self.result.add(node)
|
serge-sans-paille/pythran
|
pythran/analyses/optimizable_comprehension.py
|
Python
|
bsd-3-clause
| 1,008
|
import os
from pymco.test import ctxt
from . import base
class RabbitMQTestCase(base.IntegrationTestCase):
'''RabbitMQ integration test case.'''
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': '61613',
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': 'marionette',
}
class TestWithRabbitMQMCo22x(base.MCollective22x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo23x(base.MCollective23x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQMCo24x(base.MCollective24x, RabbitMQTestCase):
'''MCollective integration test case.'''
class TestWithRabbitMQSSLMCo23x(base.MCollective23x, RabbitMQTestCase):
"""MCollective integration test case."""
CTXT = {
'connector': 'rabbitmq',
'plugin.rabbitmq.vhost': '/mcollective',
'plugin.rabbitmq.pool.size': '1',
'plugin.rabbitmq.pool.1.host': 'localhost',
'plugin.rabbitmq.pool.1.port': 61612,
'plugin.rabbitmq.pool.1.user': 'mcollective',
'plugin.rabbitmq.pool.1.password': 'marionette',
'plugin.rabbitmq.pool.1.ssl': 'true',
'plugin.rabbitmq.pool.1.ssl.ca': os.path.join(ctxt.ROOT,
'fixtures/ca.pem'),
'plugin.rabbitmq.pool.1.ssl.key': os.path.join(
ctxt.ROOT,
'fixtures/activemq_private.pem'),
'plugin.rabbitmq.pool.1.ssl.cert': os.path.join(
ctxt.ROOT,
'fixtures/activemq_cert.pem',
),
}
|
rafaduran/python-mcollective
|
tests/integration/test_with_rabbitmq.py
|
Python
|
bsd-3-clause
| 1,771
|
from datetime import timedelta
import operator
from sys import getsizeof
import warnings
import numpy as np
from pandas._libs import index as libindex, lib
import pandas.compat as compat
from pandas.compat import get_range_parameters, lrange, range
from pandas.compat.numpy import function as nv
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes import concat as _concat
from pandas.core.dtypes.common import (
is_int64_dtype, is_integer, is_scalar, is_timedelta64_dtype)
from pandas.core.dtypes.generic import (
ABCDataFrame, ABCSeries, ABCTimedeltaIndex)
from pandas.core import ops
import pandas.core.common as com
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.indexes.numeric import Int64Index
class RangeIndex(Int64Index):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
name : object, optional
Name to be stored in the index
copy : bool, default False
Unused, accepted for homogeneity with other index types.
Attributes
----------
None
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = 'rangeindex'
_engine_type = libindex.Int64Engine
# --------------------------------------------------------------------
# Constructors
def __new__(cls, start=None, stop=None, step=None,
dtype=None, copy=False, name=None, fastpath=None):
if fastpath is not None:
warnings.warn("The 'fastpath' keyword is deprecated, and will be "
"removed in a future version.",
FutureWarning, stacklevel=2)
if fastpath:
return cls._simple_new(start, stop, step, name=name)
cls._validate_dtype(dtype)
# RangeIndex
if isinstance(start, RangeIndex):
if name is None:
name = start.name
return cls._simple_new(name=name,
**dict(start._get_data_as_items()))
# validate the arguments
def ensure_int(value, field):
msg = ("RangeIndex(...) must be called with integers,"
" {value} was passed for {field}")
if not is_scalar(value):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
try:
new_value = int(value)
assert(new_value == value)
except (TypeError, ValueError, AssertionError):
raise TypeError(msg.format(value=type(value).__name__,
field=field))
return new_value
if com._all_none(start, stop, step):
msg = "RangeIndex(...) must be called with integers"
raise TypeError(msg)
elif start is None:
start = 0
else:
start = ensure_int(start, 'start')
if stop is None:
stop = start
start = 0
else:
stop = ensure_int(stop, 'stop')
if step is None:
step = 1
elif step == 0:
raise ValueError("Step must not be zero")
else:
step = ensure_int(step, 'step')
return cls._simple_new(start, stop, step, name)
@classmethod
def from_range(cls, data, name=None, dtype=None, **kwargs):
""" Create RangeIndex from a range (py3), or xrange (py2) object. """
if not isinstance(data, range):
raise TypeError(
'{0}(...) must be called with object coercible to a '
'range, {1} was passed'.format(cls.__name__, repr(data)))
start, stop, step = get_range_parameters(data)
return RangeIndex(start, stop, step, dtype=dtype, name=name, **kwargs)
@classmethod
def _simple_new(cls, start, stop=None, step=None, name=None,
dtype=None, **kwargs):
result = object.__new__(cls)
# handle passed None, non-integers
if start is None and stop is None:
# empty
start, stop, step = 0, 0, 1
if start is None or not is_integer(start):
try:
return RangeIndex(start, stop, step, name=name, **kwargs)
except TypeError:
return Index(start, stop, step, name=name, **kwargs)
result._start = start
result._stop = stop or 0
result._step = step or 1
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
result._reset_identity()
return result
# --------------------------------------------------------------------
@staticmethod
def _validate_dtype(dtype):
""" require dtype to be None or int64 """
if not (dtype is None or is_int64_dtype(dtype)):
raise TypeError('Invalid to pass a non-int64 dtype to RangeIndex')
@cache_readonly
def _constructor(self):
""" return the class to use for construction """
return Int64Index
@cache_readonly
def _data(self):
return np.arange(self._start, self._stop, self._step, dtype=np.int64)
@cache_readonly
def _int64index(self):
return Int64Index._simple_new(self._data, name=self.name)
def _get_data_as_items(self):
""" return a list of tuples of start, stop, step """
return [('start', self._start),
('stop', self._stop),
('step', self._step)]
def __reduce__(self):
d = self._get_attributes_dict()
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (self.__class__, d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(('name', ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
# --------------------------------------------------------------------
@cache_readonly
def nbytes(self):
"""
Return the number of bytes in the underlying data
On implementations where this is undetermined (PyPy)
assume 24 bytes for each value
"""
return sum(getsizeof(getattr(self, v), 24) for v in
['_start', '_stop', '_step'])
def memory_usage(self, deep=False):
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self):
return np.dtype(np.int64)
@property
def is_unique(self):
""" return if the index has unique values """
return True
@cache_readonly
def is_monotonic_increasing(self):
return self._step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self):
return self._step < 0 or len(self) <= 1
@property
def has_duplicates(self):
return False
def tolist(self):
return lrange(self._start, self._stop, self._step)
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
name = kwargs.get("name", self.name)
return RangeIndex._simple_new(
name=name, **dict(self._get_data_as_items()))
else:
kwargs.setdefault('name', self.name)
return self._int64index._shallow_copy(values, **kwargs)
@Appender(ibase._index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
self._validate_dtype(dtype)
if name is None:
name = self.name
return RangeIndex._simple_new(
name=name, **dict(self._get_data_as_items()))
def _minmax(self, meth):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif ((meth == 'min' and self._step > 0) or
(meth == 'max' and self._step < 0)):
return self._start
return self._start + self._step * no_steps
def min(self, axis=None, skipna=True):
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
return self._minmax('min')
def max(self, axis=None, skipna=True):
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
return self._minmax('max')
def argsort(self, *args, **kwargs):
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
argsorted : numpy array
See Also
--------
numpy.ndarray.argsort
"""
nv.validate_argsort(args, kwargs)
if self._step > 0:
return np.arange(len(self))
else:
return np.arange(len(self) - 1, -1, -1)
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
ls = len(self)
lo = len(other)
return (ls == lo == 0 or
ls == lo == 1 and
self._start == other._start or
ls == lo and
self._start == other._start and
self._step == other._step)
return super(RangeIndex, self).equals(other)
def intersection(self, other, sort=False):
"""
Form the intersection of two Index objects.
Parameters
----------
other : Index or array-like
sort : False or None, default False
Sort the resulting index if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default to ``False`` to match the behaviour
from before 0.24.0.
Returns
-------
intersection : Index
"""
self._validate_sort_keyword(sort)
if self.equals(other):
return self._get_reconciled_name_object(other)
if not isinstance(other, RangeIndex):
return super(RangeIndex, self).intersection(other, sort=sort)
if not len(self) or not len(other):
return RangeIndex._simple_new(None)
first = self[::-1] if self._step < 0 else self
second = other[::-1] if other._step < 0 else other
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first._start, second._start)
int_high = min(first._stop, second._stop)
if int_high <= int_low:
return RangeIndex._simple_new(None)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, t = first._extended_gcd(first._step, second._step)
# check whether element sets intersect
if (first._start - second._start) % gcd:
return RangeIndex._simple_new(None)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first._start + (second._start - first._start) * \
first._step // gcd * s
new_step = first._step * second._step // gcd
new_index = RangeIndex._simple_new(tmp_start, int_high, new_step)
# adjust index to limiting interval
new_index._start = new_index._min_fitting_element(int_low)
if (self._step < 0 and other._step < 0) is not (new_index._step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit):
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self._start) // abs(self._step))
return self._start + abs(self._step) * no_steps
def _max_fitting_element(self, upper_limit):
"""Returns the largest element smaller than or equal to the limit"""
no_steps = (upper_limit - self._start) // abs(self._step)
return self._start + abs(self._step) * no_steps
def _extended_gcd(self, a, b):
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def union(self, other):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
Returns
-------
union : Index
"""
self._assert_can_do_setop(other)
if len(other) == 0 or self.equals(other) or len(self) == 0:
return super(RangeIndex, self).union(other)
if isinstance(other, RangeIndex):
start_s, step_s = self._start, self._step
end_s = self._start + self._step * (len(self) - 1)
start_o, step_o = other._start, other._step
end_o = other._start + other._step * (len(other) - 1)
if self._step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other._step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self._start - other._start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if ((start_s - start_o) % step_s == 0 and
(start_s - end_o) <= step_s and
(start_o - end_s) <= step_s):
return RangeIndex(start_r, end_r + step_s, step_s)
if ((step_s % 2 == 0) and
(abs(start_s - start_o) <= step_s / 2) and
(abs(end_s - end_o) <= step_s / 2)):
return RangeIndex(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if ((start_o - start_s) % step_s == 0 and
(start_o + step_s >= start_s) and
(end_o - step_s <= end_s)):
return RangeIndex(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if ((start_s - start_o) % step_o == 0 and
(start_s + step_o >= start_o) and
(end_s - step_o <= end_o)):
return RangeIndex(start_r, end_r + step_o, step_o)
return self._int64index.union(other)
@Appender(_index_shared_docs['join'])
def join(self, other, how='left', level=None, return_indexers=False,
sort=False):
if how == 'outer' and self is not other:
# note: could return RangeIndex in more circumstances
return self._int64index.join(other, how, level, return_indexers,
sort)
return super(RangeIndex, self).join(other, how, level, return_indexers,
sort)
def _concat_same_dtype(self, indexes, name):
return _concat._concat_rangeindex_same_dtype(indexes).rename(name)
def __len__(self):
"""
return the length of the RangeIndex
"""
return max(0, -(-(self._stop - self._start) // self._step))
@property
def size(self):
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
super_getitem = super(RangeIndex, self).__getitem__
if is_scalar(key):
if not lib.is_integer(key):
raise IndexError("only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices")
n = com.cast_scalar_indexer(key)
if n != key:
return super_getitem(key)
if n < 0:
n = len(self) + key
if n < 0 or n > len(self) - 1:
raise IndexError("index {key} is out of bounds for axis 0 "
"with size {size}".format(key=key,
size=len(self)))
return self._start + n * self._step
if isinstance(key, slice):
# This is basically PySlice_GetIndicesEx, but delegation to our
# super routines if we don't have integers
length = len(self)
# complete missing slice information
step = 1 if key.step is None else key.step
if key.start is None:
start = length - 1 if step < 0 else 0
else:
start = key.start
if start < 0:
start += length
if start < 0:
start = -1 if step < 0 else 0
if start >= length:
start = length - 1 if step < 0 else length
if key.stop is None:
stop = -1 if step < 0 else length
else:
stop = key.stop
if stop < 0:
stop += length
if stop < 0:
stop = -1
if stop > length:
stop = length
# delegate non-integer slices
if (start != int(start) or
stop != int(stop) or
step != int(step)):
return super_getitem(key)
# convert indexes to values
start = self._start + self._step * start
stop = self._start + self._step * stop
step = self._step * step
return RangeIndex._simple_new(start, stop, step, name=self.name)
# fall back to Int64Index
return super_getitem(key)
def __floordiv__(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
if is_integer(other) and other != 0:
if (len(self) == 0 or
self._start % other == 0 and
self._step % other == 0):
start = self._start // other
step = self._step // other
stop = start + len(self) * step
return RangeIndex._simple_new(
start, stop, step, name=self.name)
if len(self) == 1:
start = self._start // other
return RangeIndex._simple_new(
start, start + 1, 1, name=self.name)
return self._int64index // other
@classmethod
def _add_numeric_methods_binary(cls):
""" add in numeric methods, specialized to RangeIndex """
def _make_evaluate_binop(op, step=False):
"""
Parameters
----------
op : callable that accepts 2 parms
perform the binary op
step : callable, optional, default to False
op to apply to the step parm if not None
if False, use the existing step
"""
def _evaluate_numeric_binop(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return op(self._int64index, other)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return op(self._int64index, other)
other = self._validate_for_numeric_binop(other, op)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
left, right = self, other
try:
# apply if we have an override
if step:
with np.errstate(all='ignore'):
rstep = step(left._step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left._step
with np.errstate(all='ignore'):
rstart = op(left._start, right)
rstop = op(left._stop, right)
result = RangeIndex(rstart,
rstop,
rstep,
**attrs)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in
[rstart, rstop, rstep]):
result = result.astype('float64')
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
return op(self._int64index, other)
# TODO: Do attrs get handled reliably?
name = '__{name}__'.format(name=op.__name__)
return compat.set_function_name(_evaluate_numeric_binop, name, cls)
cls.__add__ = _make_evaluate_binop(operator.add)
cls.__radd__ = _make_evaluate_binop(ops.radd)
cls.__sub__ = _make_evaluate_binop(operator.sub)
cls.__rsub__ = _make_evaluate_binop(ops.rsub)
cls.__mul__ = _make_evaluate_binop(operator.mul, step=operator.mul)
cls.__rmul__ = _make_evaluate_binop(ops.rmul, step=ops.rmul)
cls.__truediv__ = _make_evaluate_binop(operator.truediv,
step=operator.truediv)
cls.__rtruediv__ = _make_evaluate_binop(ops.rtruediv,
step=ops.rtruediv)
if not compat.PY3:
cls.__div__ = _make_evaluate_binop(operator.div, step=operator.div)
cls.__rdiv__ = _make_evaluate_binop(ops.rdiv, step=ops.rdiv)
RangeIndex._add_numeric_methods()
RangeIndex._add_logical_methods()
|
GuessWhoSamFoo/pandas
|
pandas/core/indexes/range.py
|
Python
|
bsd-3-clause
| 24,595
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('msgs', '0002_auto_20150204_1116'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='issue',
),
]
|
xkmato/tracpro
|
tracpro/msgs/migrations/0003_remove_message_issue.py
|
Python
|
bsd-3-clause
| 349
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AuthIdentity.last_verified'
db.add_column(
'sentry_authidentity',
'last_verified',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False
)
# Adding field 'OrganizationMember.flags'
db.add_column(
'sentry_organizationmember',
'flags',
self.gf('django.db.models.fields.BigIntegerField')(default=0),
keep_default=False
)
def backwards(self, orm):
# Deleting field 'AuthIdentity.last_verified'
db.delete_column('sentry_authidentity', 'last_verified')
# Deleting field 'OrganizationMember.flags'
db.delete_column('sentry_organizationmember', 'flags')
models = {
'sentry.accessgroup': {
'Meta': {
'unique_together': "(('team', 'name'),)",
'object_name': 'AccessGroup'
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.User']",
'symmetrical': 'False'
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'symmetrical': 'False'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.alert': {
'Meta': {
'object_name': 'Alert'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'related_groups': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'related_alerts'",
'symmetrical': 'False',
'through': "orm['sentry.AlertRelatedGroup']",
'to': "orm['sentry.Group']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.alertrelatedgroup': {
'Meta': {
'unique_together': "(('group', 'alert'),)",
'object_name': 'AlertRelatedGroup'
},
'alert': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Alert']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'audit_actors'",
'to': "orm['sentry.User']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'badge': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.file': {
'Meta': {
'unique_together': "(('name', 'checksum'),)",
'object_name': 'File'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'storage':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'storage_options': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'type':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.pendingteammember': {
'Meta': {
'unique_together': "(('team', 'email'),)",
'object_name': 'PendingTeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'pending_member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
})
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
),
'user_added': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'keys_added_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.teammember': {
'Meta': {
'unique_together': "(('team', 'user'),)",
'object_name': 'TeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
jean/sentry
|
src/sentry/south_migrations/0159_auto__add_field_authidentity_last_verified__add_field_organizationmemb.py
|
Python
|
bsd-3-clause
| 52,679
|
import numpy as np
import pandas as pd
from bokeh.plotting import *
# Generate some synthetic time series for six different categories
cats = list("abcdef")
y = np.random.randn(2000)
g = np.random.choice(cats, 2000)
for i, l in enumerate(cats):
y[g == l] += i // 2
df = pd.DataFrame(dict(score=y, group=g))
# Find the quartiles, IQR, and outliers for each category
groups = df.groupby('group')
q1 = groups.quantile(q=0.25)
q2 = groups.quantile(q=0.5)
q3 = groups.quantile(q=0.75)
iqr = q3 - q1
upper = q2 + 1.5*iqr
lower = q2 - 1.5*iqr
def outliers(group):
cat = group.name
return group[(group.score > upper.loc[cat][0]) | (group.score < lower.loc[cat][0])]['score']
out = groups.apply(outliers).dropna()
# Prepare outlier data for plotting, we need and x (categorical) and y (numeric)
# coordinate for every outlier.
outx = []
outy = []
for cat in cats:
for value in out[cat]:
outx.append(cat)
outy.append(value)
# EXERCISE: output static HTML file
# EXERCISE: turn on plot hold
# Draw the upper segment extending from the box plot using `segment` which
# takes x0, x1 and y0, y1 as data
segment(cats, upper.score, cats, q3.score, x_range=cats, line_width=2,
tools="", background_fill="#EFE8E2", line_color="black", title="")
# EXERCISE: draw the lower segment
# Draw the upper box of the box plot using `rect`
rect(cats, (q3.score+q2.score)/2, 0.7, q3.score-q2.score,
fill_color="#E08E79", line_width=2, line_color="black")
# EXERCISE: use `rect` to draw the bottom box with a different color
# OK here we use `rect` to draw the whiskers. It's slightly cheating, but it's
# easier than using segments or lines, since we can specify widths simply with
# categorical percentage units
rect(cats, lower.score, 0.2, 0, line_color="black")
rect(cats, upper.score, 0.2, 0, line_color="black")
# EXERCISE: use `circle` to draw the outliers
# EXERCISE: use grid(), axis(), etc. to style the plot. Some suggestions:
# - remove the X grid lines, change the Y grid line color
# - make the tick labels bigger
xgrid().grid_line_color = None
ygrid().grid_line_color = "white"
ygrid().grid_line_width = 2
xaxis().major_label_text_font_size="12pt"
show()
|
sahat/bokeh
|
sphinx/source/tutorial/exercises/boxplot.py
|
Python
|
bsd-3-clause
| 2,199
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vi:ts=4 sw=4 et
import re
import os.path
import subprocess
from mainwindow import MainWindow
def default_c10t_executable():
return "./c10t"
def default_minecraft_world():
return "~/.minecraft/saves/World1"
def default_output_image():
return os.path.abspath("out.png")
def quote_arg_if_needed(arg):
"""Add quotes if the argument has 'weird' characters.
This function is extremely simple, and it is not fool-proof.
Improvements are quite welcome!
WARNING: single-quotes inside the argument will break this!"""
if re.search(r'''[^-a-zA-Z0-9_.,/+=]''', arg):
return "'%s'" % (arg,)
else:
return arg
def args_to_string(args):
"""Converts a list of arguments to one string that can be copy-pasted
into a terminal and will work (hopefully)."""
return " ".join(quote_arg_if_needed(arg) for arg in args)
class Program(object):
def __init__(self):
self.win = MainWindow()
self.args = []
# Files
self.win.ui.exepath = default_c10t_executable()
self.win.ui.world = default_minecraft_world()
self.win.ui.output = default_output_image()
self.update_ui_commandline()
self.win.update_button_callback = self.update_ui_commandline
self.win.run_button_callback = self.run_command
self.win.load_button_callback = self.load_image
def run_command(self):
self.update_ui_commandline()
proc = subprocess.Popen(self.args, shell=False)
# TODO: Add a progress window/progress bar
# Meanwhile... let's just block this program until c10t finishes...
# Ugly, but better than nothing.
proc.communicate() # TODO: Check process returncode
self.load_image()
def load_image(self):
self.win.load_image_from_file(os.path.expanduser(self.win.ui.output))
def update_ui_commandline(self):
self.build_commandline()
self.win.ui.command = args_to_string(self.args)
def build_commandline(self):
ui = self.win.ui
args = [os.path.expanduser(ui.exepath)]
# Filtering
if ui.topcheck : args.extend(["--top" , str(ui.top )])
if ui.bottomcheck: args.extend(["--bottom", str(ui.bottom)])
if ui.limitscheck:
args.extend([
"--limits",
",".join(str(x) for x in (
ui.limitsnorth,
ui.limitssouth,
ui.limitseast,
ui.limitswest,
))
])
if ui.cavemodecheck: args.append("--cave-mode")
if ui.excludecheck:
for block in re.split("[ \t,;/]+", ui.exclude):
args.extend(["-e", str(block)])
if ui.includecheck:
args.append("--hide-all")
for block in re.split("[ \t,;/]+", ui.include):
args.extend(["-i", str(block)])
# Rendering
if ui.obliquecheck : args.append("--oblique")
if ui.obliqueanglecheck: args.append("--oblique-angle")
if ui.isometriccheck : args.append("--isometric")
if ui.nightcheck : args.append("--night")
if ui.heightmapcheck : args.append("--heightmap")
if ui.rotate : args.extend(["-r", str(ui.rotate)])
if int(ui.threads) != 0: args.extend(["--threads", str(ui.threads)])
# Text and fonts
args.extend(["--ttf-size" , str(ui.ttfsize)])
args.extend(["--ttf-color", str(ui.ttfcolor)])
if ui.showplayerscheck: args.append("--show-players")
if ui.showsignscheck : args.append("--show-signs")
if ui.showcoordscheck : args.append("--show-coordinates")
if ui.playercolorcheck: args.extend(["--player-color", str(ui.playercolor)])
if ui.signcolorcheck : args.extend(["--sign-color", str(ui.signcolor)])
if ui.coordcolorcheck : args.extend(["--coordinate-color", str(ui.coordcolor)])
# Adding the "Files" section to the end for readability reasons
args.extend([
"-w", os.path.expanduser(ui.world),
"-o", os.path.expanduser(ui.output),
])
self.args = args
def main(self):
self.win.mainloop()
if __name__ == "__main__":
p = Program()
p.main()
|
TomT0m/Boolean-c10t
|
gui/c10t-tk/c10t-tk.py
|
Python
|
bsd-3-clause
| 4,346
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import sys
from telemetry.core import browser_finder
from telemetry.core import browser_options
from telemetry.page import page_test
from telemetry.page import page_runner
from telemetry.page import page_set
from telemetry.test import discover
def Main(test_dir, page_set_filenames):
"""Turns a PageTest into a command-line program.
Args:
test_dir: Path to directory containing PageTests.
"""
tests = discover.DiscoverClasses(test_dir,
os.path.join(test_dir, '..'),
page_test.PageTest)
# Naively find the test. If we use the browser options parser, we run
# the risk of failing to parse if we use a test-specific parameter.
test_name = None
for arg in sys.argv:
if arg in tests:
test_name = arg
options = browser_options.BrowserOptions()
parser = options.CreateParser('%prog [options] <test> <page_set>')
page_runner.PageRunner.AddCommandLineOptions(parser)
test = None
if test_name is not None:
if test_name not in tests:
sys.stderr.write('No test name %s found' % test_name)
sys.exit(1)
test = tests[test_name]()
test.AddCommandLineOptions(parser)
_, args = parser.parse_args()
if test is None or len(args) != 2:
parser.print_usage()
print >> sys.stderr, 'Available tests:\n%s\n' % ',\n'.join(
sorted(tests.keys()))
print >> sys.stderr, 'Available page_sets:\n%s\n' % ',\n'.join(
sorted([os.path.relpath(f)
for f in page_set_filenames]))
sys.exit(1)
ps = page_set.PageSet.FromFile(args[1])
results = page_test.PageTestResults()
return RunTestOnPageSet(options, ps, test, results)
def RunTestOnPageSet(options, ps, test, results):
test.CustomizeBrowserOptions(options)
possible_browser = browser_finder.FindBrowser(options)
if not possible_browser:
print >> sys.stderr, """No browser found.\n
Use --browser=list to figure out which are available.\n"""
sys.exit(1)
with page_runner.PageRunner(ps) as runner:
runner.Run(options, possible_browser, test, results)
print '%i pages succeed\n' % len(results.page_successes)
if len(results.page_failures):
logging.warning('Failed pages: %s', '\n'.join(
[failure['page'].url for failure in results.page_failures]))
if len(results.skipped_pages):
logging.warning('Skipped pages: %s', '\n'.join(
[skipped['page'].url for skipped in results.skipped_pages]))
return min(255, len(results.page_failures))
|
codenote/chromium-test
|
tools/telemetry/telemetry/page/page_test_runner.py
|
Python
|
bsd-3-clause
| 2,691
|
if __name__ == '__main__':
x = int(raw_input())
y = int(raw_input())
z = int(raw_input())
n = int(raw_input())
print ( [ [i,j,k] for i in range(x+1) for j in range(y+1) for k in range(z+1) if i+j+k != n] )
|
jaswal72/hacker-rank
|
Python/Basic Data Types/List Comprehensions.py
|
Python
|
mit
| 226
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import pytest
from tableschema import types
from tableschema.config import ERROR
# Tests
@pytest.mark.parametrize('format, value, result', [
('default', [2000, 10], (2000, 10)),
('default', (2000, 10), (2000, 10)),
('default', '2000-10', (2000, 10)),
('default', (2000, 10, 20), ERROR),
('default', '2000-13-20', ERROR),
('default', '2000-13', ERROR),
('default', '2000-0', ERROR),
('default', '13', ERROR),
('default', -10, ERROR),
('default', 20, ERROR),
('default', '3.14', ERROR),
('default', '', ERROR),
])
def test_cast_yearmonth(format, value, result):
assert types.cast_yearmonth(format, value) == result
|
okfn/jsontableschema-py
|
tests/types/test_yearmonth.py
|
Python
|
mit
| 842
|
from collections import defaultdict
from mongoengine.python_support import txt_type
__all__ = ('NotRegistered', 'InvalidDocumentError', 'LookUpError',
'DoesNotExist', 'MultipleObjectsReturned', 'InvalidQueryError',
'OperationError', 'NotUniqueError', 'FieldDoesNotExist',
'ValidationError')
class NotRegistered(Exception):
pass
class InvalidDocumentError(Exception):
pass
class LookUpError(AttributeError):
pass
class DoesNotExist(Exception):
pass
class MultipleObjectsReturned(Exception):
pass
class InvalidQueryError(Exception):
pass
class OperationError(Exception):
pass
class NotUniqueError(OperationError):
pass
class FieldDoesNotExist(Exception):
"""Raised when trying to set a field
not declared in a :class:`~mongoengine.Document`
or an :class:`~mongoengine.EmbeddedDocument`.
To avoid this behavior on data loading,
you should the :attr:`strict` to ``False``
in the :attr:`meta` dictionnary.
"""
class ValidationError(AssertionError):
"""Validation exception.
May represent an error validating a field or a
document containing fields with validation errors.
:ivar errors: A dictionary of errors for fields within this
document or list, or None if the error is for an
individual field.
"""
errors = {}
field_name = None
_message = None
def __init__(self, message="", **kwargs):
self.errors = kwargs.get('errors', {})
self.field_name = kwargs.get('field_name')
self.message = message
def __str__(self):
return txt_type(self.message)
def __repr__(self):
return '%s(%s,)' % (self.__class__.__name__, self.message)
def __getattribute__(self, name):
message = super(ValidationError, self).__getattribute__(name)
if name == 'message':
if self.field_name:
message = '%s' % message
if self.errors:
message = '%s(%s)' % (message, self._format_errors())
return message
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def to_dict(self):
"""Returns a dictionary of all errors within a document
Keys are field names or list indices and values are the
validation error messages, or a nested dictionary of
errors for an embedded document or list.
"""
def build_dict(source):
errors_dict = {}
if not source:
return errors_dict
if isinstance(source, dict):
for field_name, error in source.iteritems():
errors_dict[field_name] = build_dict(error)
elif isinstance(source, ValidationError) and source.errors:
return build_dict(source.errors)
else:
return unicode(source)
return errors_dict
if not self.errors:
return {}
return build_dict(self.errors)
def _format_errors(self):
"""Returns a string listing all errors within a document"""
def generate_key(value, prefix=''):
if isinstance(value, list):
value = ' '.join([generate_key(k) for k in value])
if isinstance(value, dict):
value = ' '.join(
[generate_key(v, k) for k, v in value.iteritems()])
results = "%s.%s" % (prefix, value) if prefix else value
return results
error_dict = defaultdict(list)
for k, v in self.to_dict().iteritems():
error_dict[generate_key(v)].append(k)
return ' '.join(["%s: %s" % (k, v) for k, v in error_dict.iteritems()])
|
starsirius/mongoengine
|
mongoengine/errors.py
|
Python
|
mit
| 3,834
|
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# written by Matt Chisholm
import wx
from BTL.defer import ThreadedDeferred
from BTL.language import languages, language_names
from BTL.platform import app_name
from BitTorrent.platform import read_language_file, write_language_file
from BitTorrent.GUI_wx import SPACING, VSizer, gui_wrap, text_wrappable
error_color = wx.Colour(192,0,0)
class LanguageSettings(wx.Panel):
def __init__(self, parent, *a, **k):
wx.Panel.__init__(self, parent, *a, **k)
self.sizer = VSizer()
self.SetSizer(self.sizer)
if 'errback' in k:
self.errback = k.pop('errback')
else:
self.errback = self.set_language_failed
# widgets
self.box = wx.StaticBox(self, label="Translate %s into:" % app_name)
self.language_names = ["System default",] + [language_names[l] for l in languages]
languages.insert(0, '')
self.languages = languages
self.choice = wx.Choice(self, choices=self.language_names)
self.Bind(wx.EVT_CHOICE, self.set_language, self.choice)
restart = wx.StaticText(self, -1,
"You must restart %s for the\nlanguage "
"setting to take effect." % app_name)
self.bottom_error = wx.StaticText(self, -1, '')
self.bottom_error.SetForegroundColour(error_color)
# sizers
self.box_sizer = wx.StaticBoxSizer(self.box, wx.VERTICAL)
# set menu selection and warning item if necessary
self.valid = True
lang = read_language_file()
if lang is not None:
try:
i = self.languages.index(lang)
self.choice.SetSelection(i)
except ValueError, e:
self.top_error = wx.StaticText(self, -1,
"This version of %s does not \nsupport the language '%s'."%(app_name,lang),)
self.top_error.SetForegroundColour(error_color)
self.box_sizer.Add(self.top_error, flag=wx.TOP|wx.LEFT|wx.RIGHT, border=SPACING)
# BUG add menu separator
# BUG change color of extra menu item
self.choice.Append(lang)
self.choice.SetSelection(len(self.languages))
self.valid = False
else:
self.choice.SetSelection(0)
# other sizers
self.box_sizer.Add(self.choice, flag=wx.GROW|wx.ALL, border=SPACING)
self.box_sizer.Add(restart, flag=wx.BOTTOM|wx.LEFT|wx.RIGHT, border=SPACING)
self.box_sizer.Add(self.bottom_error, flag=wx.BOTTOM|wx.LEFT|wx.RIGHT, border=SPACING)
# clear out bottom error
self.clear_error()
self.sizer.AddFirst(self.box_sizer, flag=wx.GROW)
self.sizer.Fit(self)
def set_language(self, *a):
index = self.choice.GetSelection()
if index >= len(self.languages):
return
l = self.languages[index]
if not self.valid:
self.choice.Delete(len(self.languages))
self.choice.SetSelection(index)
self.valid = True
self.box_sizer.Detach(0)
self.top_error.Destroy()
self.box_sizer.Layout()
self.sizer.Layout()
d = ThreadedDeferred(gui_wrap, write_language_file, l)
d.addErrback(lambda e: self.set_language_failed(e, l))
d.addCallback(lambda r: self.language_was_set())
def language_was_set(self, *a):
self.clear_error()
wx.MessageBox("You must restart %s for the language "
"setting to take effect." % app_name,
"%s translation" % app_name,
style=wx.ICON_INFORMATION)
def clear_error(self):
index = self.box_sizer.GetItem(self.bottom_error)
if index:
self.box_sizer.Detach(self.bottom_error)
self.bottom_error.SetLabel('')
self.refit()
def set_error(self, errstr):
index = self.box_sizer.GetItem(self.bottom_error)
if not index:
self.box_sizer.Add(self.bottom_error, flag=wx.BOTTOM|wx.LEFT|wx.RIGHT, border=SPACING)
self.bottom_error.SetLabel(errstr)
if text_wrappable: self.bottom_error.Wrap(250)
self.refit()
def set_language_failed(self, e, l):
errstr = 'Could not find translation for language "%s"' % l
wx.the_app.logger.error(errstr, exc_info=e)
errstr = errstr + '\n%s: %s' % (str(e[0]), unicode(e[1].args[0]))
self.set_error(errstr)
def refit(self):
self.box_sizer.Layout()
self.sizer.Layout()
#self.sizer.Fit(self)
self.GetParent().Fit()
|
rabimba/p2pScrapper
|
BitTorrent-5.2.2/BitTorrent/GUI_wx/LanguageSettings.py
|
Python
|
mit
| 5,226
|
"""
Microformats2 is a general way to mark up any HTML document with
classes and propeties. This library parses structured data from
a microformatted HTML document and returns a well-formed JSON
dictionary.
"""
from .version import __version__
from .parser import Parser, parse
from .mf_helpers import get_url
__all__ = ['Parser', 'parse', 'get_url', '__version__']
|
tommorris/mf2py
|
mf2py/__init__.py
|
Python
|
mit
| 369
|
#!/usr/bin/env python
import string, copy
import sys
def read_fasta(afile, query_id=''):
"""Parses any fasta, a2m, a3m file, sequence or alignment file.
@param afile input file
@param query_id ID of query sequence (default='')
Ensures: key of a given query ID only contains its ID, not the full header
@return {header: [sequence_1, sequence_2, ...]}
"""
seq_dict = {}
header = ''
seq = ''
for aline in afile:
aline = aline.strip()
# check for header
if aline.startswith('>'):
if header != '' and seq != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
seq = ''
if aline.startswith('>%s' % query_id) and query_id !='':
header = query_id
else:
header = aline[1:]
# otherwise concatenate sequence
else:
#aline_seq = aline.translate(None, '.-').upper()
seq += aline
# add last entry
if header != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
else:
sys.stderr.write('ERROR: file empty or wrong file format')
return seq_dict
def read_fasta_pdb(afile, query_id=''):
"""Parses any fasta, a2m, a3m file, sequence or alignment file.
@param afile input file
@param query_id ID of query sequence (default='')
Ensures: key = PDB accession
@return {PDB-acc: [sequence_1, sequence_2, ...]}
"""
seq_dict = {}
header = ''
seq = ''
for aline in afile:
aline = aline.strip()
# check for header
if aline.startswith('>'):
if header != '' and seq != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
seq = ''
if aline.startswith('>%s' % query_id) and query_id !='':
header = query_id
else:
header = aline[1:].split()[0]
# otherwise concatenate sequence
else:
#aline_seq = aline.translate(None, '.-').upper()
seq += aline
# add last entry
if header != '':
if seq_dict.has_key(header):
seq_dict[header].append(seq)
else:
seq_dict[header] = [seq]
else:
sys.stderr.write('ERROR: file empty or wrong file format')
return seq_dict
if __name__ == "__main__":
afile = open(sys.argv[1], 'r')
if len(sys.argv) == 3:
query_id = sys.argv[2]
else:
query_id = ''
seq_dict = read_fasta(afile, query_id)
afile.close()
print 'There are %d entries with unique headers in your file.' % len(seq_dict)
|
ElofssonLab/pcons-fold
|
pconsc/plotting/parse_fasta.py
|
Python
|
mit
| 3,026
|
"""
x86 definitions.
Commonly used definitions.
"""
from __future__ import absolute_import
from cdsl.isa import TargetISA, CPUMode
import base.instructions
from . import instructions as x86
from base.immediates import floatcc
ISA = TargetISA('x86', [base.instructions.GROUP, x86.GROUP]) # type: TargetISA
# CPU modes for 32-bit and 64-bit operation.
X86_64 = CPUMode('I64', ISA)
X86_32 = CPUMode('I32', ISA)
# The set of floating point condition codes that are directly supported.
# Other condition codes need to be reversed or expressed as two tests.
supported_floatccs = [
floatcc.ord,
floatcc.uno,
floatcc.one,
floatcc.ueq,
floatcc.gt,
floatcc.ge,
floatcc.ult,
floatcc.ule]
|
nrc/rustc-perf
|
collector/benchmarks/cranelift-codegen/cranelift-codegen/meta-python/isa/x86/defs.py
|
Python
|
mit
| 746
|
from __future__ import unicode_literals, division, absolute_import
import os
import re
import logging
from path import path
from flexget import plugin
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.plugin import get_plugin_by_name
from flexget.utils.tools import TimedDict
log = logging.getLogger('exists_movie')
class FilterExistsMovie(object):
"""
Reject existing movies.
Syntax:
exists_movie:
path: /path/to/movies
[type: {dirs|files}]
[allow_different_qualities: {better|yes|no}]
[lookup: {imdb|no}]
"""
schema = {
'anyOf': [
one_or_more({'type': 'string', 'format': 'path'}),
{
'type': 'object',
'properties': {
'path': one_or_more({'type': 'string', 'format': 'path'}),
'allow_different_qualities': {'enum': ['better', True, False], 'default': False},
'type': {'enum': ['files', 'dirs'], 'default': 'dirs'},
'lookup': {'enum': ['imdb', False], 'default': False}
},
'required': ['path'],
'additionalProperties': False
}
]
}
dir_pattern = re.compile('\b(cd.\d|subs?|samples?)\b',re.IGNORECASE)
file_pattern = re.compile('\.(avi|mkv|mp4|mpg|webm)$',re.IGNORECASE)
def __init__(self):
self.cache = TimedDict(cache_time='1 hour')
def prepare_config(self, config):
# if config is not a dict, assign value to 'path' key
if not isinstance(config, dict):
config = { 'path': config }
if not config.get('type'):
config['type'] = 'dirs'
# if only a single path is passed turn it into a 1 element list
if isinstance(config['path'], basestring):
config['path'] = [config['path']]
return config
@plugin.priority(-1)
def on_task_filter(self, task, config):
if not task.accepted:
log.debug('nothing accepted, aborting')
return
config = self.prepare_config(config)
imdb_lookup = plugin.get_plugin_by_name('imdb_lookup').instance
incompatible_files = 0
incompatible_entries = 0
count_entries = 0
count_files = 0
# list of imdb ids gathered from paths / cache
qualities = {}
for folder in config['path']:
folder = path(folder).expanduser()
# see if this path has already been scanned
if folder in self.cache:
log.verbose('Using cached scan for %s ...' % folder)
qualities.update(self.cache[folder])
continue
path_ids = {}
if not folder.isdir():
log.critical('Path %s does not exist' % folder)
continue
log.verbose('Scanning path %s ...' % folder)
# Help debugging by removing a lot of noise
#logging.getLogger('movieparser').setLevel(logging.WARNING)
#logging.getLogger('imdb_lookup').setLevel(logging.WARNING)
# scan through
items = []
if config.get('type') == 'dirs':
for d in folder.walkdirs(errors='ignore'):
if self.dir_pattern.search(d.name):
continue
items.append(d.name)
elif config.get('type') == 'files':
for f in folder.walkfiles(errors='ignore'):
if not self.file_pattern.search(f.name):
continue
items.append(f.name)
for item in items:
count_files += 1
movie = get_plugin_by_name('parsing').instance.parse_movie(item)
if config.get('lookup') == 'imdb':
try:
imdb_id = imdb_lookup.imdb_id_lookup(movie_title=movie.name,
raw_title=item,
session=task.session)
if imdb_id in path_ids:
log.trace('duplicate %s' % item)
continue
if imdb_id is not None:
log.trace('adding: %s' % imdb_id)
path_ids[imdb_id] = movie.quality
except plugin.PluginError as e:
log.trace('%s lookup failed (%s)' % (item, e.value))
incompatible_files += 1
else:
path_ids[movie.name] = movie.quality
log.trace('adding: %s' % movie.name)
# store to cache and extend to found list
self.cache[folder] = path_ids
qualities.update(path_ids)
log.debug('-- Start filtering entries ----------------------------------')
# do actual filtering
for entry in task.accepted:
count_entries += 1
if config.get('lookup') == 'imdb':
key = 'imdb_id'
if not entry.get('imdb_id', eval_lazy=False):
try:
imdb_lookup.lookup(entry)
except plugin.PluginError as e:
log.trace('entry %s imdb failed (%s)' % (entry['title'], e.value))
incompatible_entries += 1
continue
else:
key = 'movie_name'
if not entry.get('movie_name', eval_lazy=False):
movie = get_plugin_by_name('parsing').instance.parse_movie(entry['title'])
entry['movie_name'] = movie.name
# actual filtering
if entry[key] in qualities:
if config.get('allow_different_qualities') == 'better':
if entry['quality'] > qualities[entry[key]]:
log.trace('better quality')
continue
elif config.get('allow_different_qualities'):
if entry['quality'] != qualities[entry[key]]:
log.trace('wrong quality')
continue
entry.reject('movie exists')
if incompatible_files or incompatible_entries:
log.verbose('There were some incompatible items. %s of %s entries '
'and %s of %s directories could not be verified.' %
(incompatible_entries, count_entries, incompatible_files, count_files))
log.debug('-- Finished filtering entries -------------------------------')
@event('plugin.register')
def register_plugin():
plugin.register(FilterExistsMovie, 'exists_movie', groups=['exists'], api_ver=2)
|
vfrc2/Flexget
|
flexget/plugins/filter/exists_movie.py
|
Python
|
mit
| 6,869
|
"""
Type Inference
"""
from .typevar import TypeVar
from .ast import Def, Var
from copy import copy
from itertools import product
try:
from typing import Dict, TYPE_CHECKING, Union, Tuple, Optional, Set # noqa
from typing import Iterable, List, Any, TypeVar as MTypeVar # noqa
from typing import cast
from .xform import Rtl, XForm # noqa
from .ast import Expr # noqa
from .typevar import TypeSet # noqa
if TYPE_CHECKING:
T = MTypeVar('T')
TypeMap = Dict[TypeVar, TypeVar]
VarTyping = Dict[Var, TypeVar]
except ImportError:
TYPE_CHECKING = False
pass
class TypeConstraint(object):
"""
Base class for all runtime-emittable type constraints.
"""
def __init__(self, tv, tc):
# type: (TypeVar, Union[TypeVar, TypeSet]) -> None
"""
Abstract "constructor" for linters
"""
assert False, "Abstract"
def translate(self, m):
# type: (Union[TypeEnv, TypeMap]) -> TypeConstraint
"""
Translate any TypeVars in the constraint according to the map or
TypeEnv m
"""
def translate_one(a):
# type: (Any) -> Any
if (isinstance(a, TypeVar)):
return m[a] if isinstance(m, TypeEnv) else subst(a, m)
return a
res = None # type: TypeConstraint
res = self.__class__(*tuple(map(translate_one, self._args())))
return res
def __eq__(self, other):
# type: (object) -> bool
if (not isinstance(other, self.__class__)):
return False
assert isinstance(other, TypeConstraint) # help MyPy figure out other
return self._args() == other._args()
def is_concrete(self):
# type: () -> bool
"""
Return true iff all typevars in the constraint are singletons.
"""
return [] == list(filter(lambda x: x.singleton_type() is None,
self.tvs()))
def __hash__(self):
# type: () -> int
return hash(self._args())
def _args(self):
# type: () -> Tuple[Any,...]
"""
Return a tuple with the exact arguments passed to __init__ to create
this object.
"""
assert False, "Abstract"
def tvs(self):
# type: () -> Iterable[TypeVar]
"""
Return the typevars contained in this constraint.
"""
return list(filter(lambda x: isinstance(x, TypeVar), self._args()))
def is_trivial(self):
# type: () -> bool
"""
Return true if this constrain is statically decidable.
"""
assert False, "Abstract"
def eval(self):
# type: () -> bool
"""
Evaluate this constraint. Should only be called when the constraint has
been translated to concrete types.
"""
assert False, "Abstract"
def __repr__(self):
# type: () -> str
return (self.__class__.__name__ + '(' +
', '.join(map(str, self._args())) + ')')
class TypesEqual(TypeConstraint):
"""
Constraint specifying that two derived type vars must have the same runtime
type.
"""
def __init__(self, tv1, tv2):
# type: (TypeVar, TypeVar) -> None
(self.tv1, self.tv2) = sorted([tv1, tv2], key=repr)
def _args(self):
# type: () -> Tuple[Any,...]
""" See TypeConstraint._args() """
return (self.tv1, self.tv2)
def is_trivial(self):
# type: () -> bool
""" See TypeConstraint.is_trivial() """
return self.tv1 == self.tv2 or self.is_concrete()
def eval(self):
# type: () -> bool
""" See TypeConstraint.eval() """
assert self.is_concrete()
return self.tv1.singleton_type() == self.tv2.singleton_type()
class InTypeset(TypeConstraint):
"""
Constraint specifying that a type var must belong to some typeset.
"""
def __init__(self, tv, ts):
# type: (TypeVar, TypeSet) -> None
assert not tv.is_derived and tv.name.startswith("typeof_")
self.tv = tv
self.ts = ts
def _args(self):
# type: () -> Tuple[Any,...]
""" See TypeConstraint._args() """
return (self.tv, self.ts)
def is_trivial(self):
# type: () -> bool
""" See TypeConstraint.is_trivial() """
tv_ts = self.tv.get_typeset().copy()
# Trivially True
if (tv_ts.issubset(self.ts)):
return True
# Trivially false
tv_ts &= self.ts
if (tv_ts.size() == 0):
return True
return self.is_concrete()
def eval(self):
# type: () -> bool
""" See TypeConstraint.eval() """
assert self.is_concrete()
return self.tv.get_typeset().issubset(self.ts)
class WiderOrEq(TypeConstraint):
"""
Constraint specifying that a type var tv1 must be wider than or equal to
type var tv2 at runtime. This requires that:
1) They have the same number of lanes
2) In a lane tv1 has at least as many bits as tv2.
"""
def __init__(self, tv1, tv2):
# type: (TypeVar, TypeVar) -> None
self.tv1 = tv1
self.tv2 = tv2
def _args(self):
# type: () -> Tuple[Any,...]
""" See TypeConstraint._args() """
return (self.tv1, self.tv2)
def is_trivial(self):
# type: () -> bool
""" See TypeConstraint.is_trivial() """
# Trivially true
if (self.tv1 == self.tv2):
return True
ts1 = self.tv1.get_typeset()
ts2 = self.tv2.get_typeset()
def set_wider_or_equal(s1, s2):
# type: (Set[int], Set[int]) -> bool
return len(s1) > 0 and len(s2) > 0 and min(s1) >= max(s2)
# Trivially True
if set_wider_or_equal(ts1.ints, ts2.ints) and\
set_wider_or_equal(ts1.floats, ts2.floats) and\
set_wider_or_equal(ts1.bools, ts2.bools):
return True
def set_narrower(s1, s2):
# type: (Set[int], Set[int]) -> bool
return len(s1) > 0 and len(s2) > 0 and min(s1) < max(s2)
# Trivially False
if set_narrower(ts1.ints, ts2.ints) and\
set_narrower(ts1.floats, ts2.floats) and\
set_narrower(ts1.bools, ts2.bools):
return True
# Trivially False
if len(ts1.lanes.intersection(ts2.lanes)) == 0:
return True
return self.is_concrete()
def eval(self):
# type: () -> bool
""" See TypeConstraint.eval() """
assert self.is_concrete()
typ1 = self.tv1.singleton_type()
typ2 = self.tv2.singleton_type()
return typ1.wider_or_equal(typ2)
class SameWidth(TypeConstraint):
"""
Constraint specifying that two types have the same width. E.g. i32x2 has
the same width as i64x1, i16x4, f32x2, f64, b1x64 etc.
"""
def __init__(self, tv1, tv2):
# type: (TypeVar, TypeVar) -> None
self.tv1 = tv1
self.tv2 = tv2
def _args(self):
# type: () -> Tuple[Any,...]
""" See TypeConstraint._args() """
return (self.tv1, self.tv2)
def is_trivial(self):
# type: () -> bool
""" See TypeConstraint.is_trivial() """
# Trivially true
if (self.tv1 == self.tv2):
return True
ts1 = self.tv1.get_typeset()
ts2 = self.tv2.get_typeset()
# Trivially False
if len(ts1.widths().intersection(ts2.widths())) == 0:
return True
return self.is_concrete()
def eval(self):
# type: () -> bool
""" See TypeConstraint.eval() """
assert self.is_concrete()
typ1 = self.tv1.singleton_type()
typ2 = self.tv2.singleton_type()
return (typ1.width() == typ2.width())
class TypeEnv(object):
"""
Class encapsulating the necessary book keeping for type inference.
:attribute type_map: dict holding the equivalence relations between tvs
:attribute constraints: a list of accumulated constraints - tuples
(tv1, tv2)) where tv1 and tv2 are equal
:attribute ranks: dictionary recording the (optional) ranks for tvs.
'rank' is a partial ordering on TVs based on their
origin. See comments in rank() and register().
:attribute vars: a set containing all known Vars
:attribute idx: counter used to get fresh ids
"""
RANK_SINGLETON = 5
RANK_INPUT = 4
RANK_INTERMEDIATE = 3
RANK_OUTPUT = 2
RANK_TEMP = 1
RANK_INTERNAL = 0
def __init__(self, arg=None):
# type: (Optional[Tuple[TypeMap, List[TypeConstraint]]]) -> None
self.ranks = {} # type: Dict[TypeVar, int]
self.vars = set() # type: Set[Var]
if arg is None:
self.type_map = {} # type: TypeMap
self.constraints = [] # type: List[TypeConstraint]
else:
self.type_map, self.constraints = arg
self.idx = 0
def __getitem__(self, arg):
# type: (Union[TypeVar, Var]) -> TypeVar
"""
Lookup the canonical representative for a Var/TypeVar.
"""
if (isinstance(arg, Var)):
assert arg in self.vars
tv = arg.get_typevar()
else:
assert (isinstance(arg, TypeVar))
tv = arg
while tv in self.type_map:
tv = self.type_map[tv]
if tv.is_derived:
tv = TypeVar.derived(self[tv.base], tv.derived_func)
return tv
def equivalent(self, tv1, tv2):
# type: (TypeVar, TypeVar) -> None
"""
Record a that the free tv1 is part of the same equivalence class as
tv2. The canonical representative of the merged class is tv2's
canonical representative.
"""
assert not tv1.is_derived
assert self[tv1] == tv1
# Make sure we don't create cycles
if tv2.is_derived:
assert self[tv2.base] != tv1
self.type_map[tv1] = tv2
def add_constraint(self, constr):
# type: (TypeConstraint) -> None
"""
Add a new constraint
"""
if (constr in self.constraints):
return
# InTypeset constraints can be expressed by constraining the typeset of
# a variable. No need to add them to self.constraints
if (isinstance(constr, InTypeset)):
self[constr.tv].constrain_types_by_ts(constr.ts)
return
self.constraints.append(constr)
def get_uid(self):
# type: () -> str
r = str(self.idx)
self.idx += 1
return r
def __repr__(self):
# type: () -> str
return self.dot()
def rank(self, tv):
# type: (TypeVar) -> int
"""
Get the rank of tv in the partial order. TVs directly associated with a
Var get their rank from the Var (see register()). Internally generated
non-derived TVs implicitly get the lowest rank (0). Derived variables
get their rank from their free typevar. Singletons have the highest
rank. TVs associated with vars in a source pattern have a higher rank
than TVs associated with temporary vars.
"""
default_rank = TypeEnv.RANK_INTERNAL if tv.singleton_type() is None \
else TypeEnv.RANK_SINGLETON
if tv.is_derived:
tv = tv.free_typevar()
return self.ranks.get(tv, default_rank)
def register(self, v):
# type: (Var) -> None
"""
Register a new Var v. This computes a rank for the associated TypeVar
for v, which is used to impose a partial order on type variables.
"""
self.vars.add(v)
if v.is_input():
r = TypeEnv.RANK_INPUT
elif v.is_intermediate():
r = TypeEnv.RANK_INTERMEDIATE
elif v.is_output():
r = TypeEnv.RANK_OUTPUT
else:
assert(v.is_temp())
r = TypeEnv.RANK_TEMP
self.ranks[v.get_typevar()] = r
def free_typevars(self):
# type: () -> List[TypeVar]
"""
Get the free typevars in the current type env.
"""
tvs = set([self[tv].free_typevar() for tv in self.type_map.keys()])
tvs = tvs.union(set([self[v].free_typevar() for v in self.vars]))
# Filter out None here due to singleton type vars
return sorted(filter(lambda x: x is not None, tvs),
key=lambda x: x.name)
def normalize(self):
# type: () -> None
"""
Normalize by:
- collapsing any roots that don't correspond to a concrete TV AND
have a single TV derived from them or equivalent to them
E.g. if we have a root of the tree that looks like:
typeof_a typeof_b
\\ /
typeof_x
|
half_width(1)
|
1
we want to collapse the linear path between 1 and typeof_x. The
resulting graph is:
typeof_a typeof_b
\\ /
typeof_x
"""
source_tvs = set([v.get_typevar() for v in self.vars])
children = {} # type: Dict[TypeVar, Set[TypeVar]]
for v in self.type_map.values():
if not v.is_derived:
continue
t = v.free_typevar()
s = children.get(t, set())
s.add(v)
children[t] = s
for (a, b) in self.type_map.items():
s = children.get(b, set())
s.add(a)
children[b] = s
for r in self.free_typevars():
while (r not in source_tvs and r in children and
len(children[r]) == 1):
child = list(children[r])[0]
if child in self.type_map:
assert self.type_map[child] == r
del self.type_map[child]
r = child
def extract(self):
# type: () -> TypeEnv
"""
Extract a clean type environment from self, that only mentions
TVs associated with real variables
"""
vars_tvs = set([v.get_typevar() for v in self.vars])
new_type_map = {tv: self[tv] for tv in vars_tvs if tv != self[tv]}
new_constraints = [] # type: List[TypeConstraint]
for constr in self.constraints:
constr = constr.translate(self)
if constr.is_trivial() or constr in new_constraints:
continue
# Sanity: translated constraints should refer to only real vars
for arg in constr._args():
if (not isinstance(arg, TypeVar)):
continue
arg_free_tv = arg.free_typevar()
assert arg_free_tv is None or arg_free_tv in vars_tvs
new_constraints.append(constr)
# Sanity: translated typemap should refer to only real vars
for (k, v) in new_type_map.items():
assert k in vars_tvs
assert v.free_typevar() is None or v.free_typevar() in vars_tvs
t = TypeEnv()
t.type_map = new_type_map
t.constraints = new_constraints
# ranks and vars contain only TVs associated with real vars
t.ranks = copy(self.ranks)
t.vars = copy(self.vars)
return t
def concrete_typings(self):
# type: () -> Iterable[VarTyping]
"""
Return an iterable over all possible concrete typings permitted by this
TypeEnv.
"""
free_tvs = self.free_typevars()
free_tv_iters = [tv.get_typeset().concrete_types() for tv in free_tvs]
for concrete_types in product(*free_tv_iters):
# Build type substitutions for all free vars
m = {tv: TypeVar.singleton(typ)
for (tv, typ) in zip(free_tvs, concrete_types)}
concrete_var_map = {v: subst(self[v.get_typevar()], m)
for v in self.vars}
# Check if constraints are satisfied for this typing
failed = None
for constr in self.constraints:
concrete_constr = constr.translate(m)
if not concrete_constr.eval():
failed = concrete_constr
break
if (failed is not None):
continue
yield concrete_var_map
def permits(self, concrete_typing):
# type: (VarTyping) -> bool
"""
Return true iff this TypeEnv permits the (possibly partial) concrete
variable type mapping concrete_typing.
"""
# Each variable has a concrete type, that is a subset of its inferred
# typeset.
for (v, typ) in concrete_typing.items():
assert typ.singleton_type() is not None
if not typ.get_typeset().issubset(self[v].get_typeset()):
return False
m = {self[v]: typ for (v, typ) in concrete_typing.items()}
# Constraints involving vars in concrete_typing are satisfied
for constr in self.constraints:
try:
# If the constraint includes only vars in concrete_typing, we
# can translate it using m. Otherwise we encounter a KeyError
# and ignore it
constr = constr.translate(m)
if not constr.eval():
return False
except KeyError:
pass
return True
def dot(self):
# type: () -> str
"""
Return a representation of self as a graph in dot format.
Nodes correspond to TypeVariables.
Dotted edges correspond to equivalences between TVS
Solid edges correspond to derivation relations between TVs.
Dashed edges correspond to equivalence constraints.
"""
def label(s):
# type: (TypeVar) -> str
return "\"" + str(s) + "\""
# Add all registered TVs (as some of them may be singleton nodes not
# appearing in the graph
nodes = set() # type: Set[TypeVar]
edges = set() # type: Set[Tuple[TypeVar, TypeVar, str, str, Optional[str]]] # noqa
def add_nodes(*args):
# type: (*TypeVar) -> None
for tv in args:
nodes.add(tv)
while (tv.is_derived):
nodes.add(tv.base)
edges.add((tv, tv.base, "solid", "forward",
tv.derived_func))
tv = tv.base
for v in self.vars:
add_nodes(v.get_typevar())
for (tv1, tv2) in self.type_map.items():
# Add all intermediate TVs appearing in edges
add_nodes(tv1, tv2)
edges.add((tv1, tv2, "dotted", "forward", None))
for constr in self.constraints:
if isinstance(constr, TypesEqual):
add_nodes(constr.tv1, constr.tv2)
edges.add((constr.tv1, constr.tv2, "dashed", "none", "equal"))
elif isinstance(constr, WiderOrEq):
add_nodes(constr.tv1, constr.tv2)
edges.add((constr.tv1, constr.tv2, "dashed", "forward", ">="))
elif isinstance(constr, SameWidth):
add_nodes(constr.tv1, constr.tv2)
edges.add((constr.tv1, constr.tv2, "dashed", "none",
"same_width"))
else:
assert False, "Can't display constraint {}".format(constr)
root_nodes = set([x for x in nodes
if x not in self.type_map and not x.is_derived])
r = "digraph {\n"
for n in nodes:
r += label(n)
if n in root_nodes:
r += "[xlabel=\"{}\"]".format(self[n].get_typeset())
r += ";\n"
for (n1, n2, style, direction, elabel) in edges:
e = label(n1) + "->" + label(n2)
e += "[style={},dir={}".format(style, direction)
if elabel is not None:
e += ",label=\"{}\"".format(elabel)
e += "];\n"
r += e
r += "}"
return r
if TYPE_CHECKING:
TypingError = str
TypingOrError = Union[TypeEnv, TypingError]
def get_error(typing_or_err):
# type: (TypingOrError) -> Optional[TypingError]
"""
Helper function to appease mypy when checking the result of typing.
"""
if isinstance(typing_or_err, str):
if (TYPE_CHECKING):
return cast(TypingError, typing_or_err)
else:
return typing_or_err
else:
return None
def get_type_env(typing_or_err):
# type: (TypingOrError) -> TypeEnv
"""
Helper function to appease mypy when checking the result of typing.
"""
assert isinstance(typing_or_err, TypeEnv), \
"Unexpected error: {}".format(typing_or_err)
if (TYPE_CHECKING):
return cast(TypeEnv, typing_or_err)
else:
return typing_or_err
def subst(tv, tv_map):
# type: (TypeVar, TypeMap) -> TypeVar
"""
Perform substition on the input tv using the TypeMap tv_map.
"""
if tv in tv_map:
return tv_map[tv]
if tv.is_derived:
return TypeVar.derived(subst(tv.base, tv_map), tv.derived_func)
return tv
def normalize_tv(tv):
# type: (TypeVar) -> TypeVar
"""
Normalize a (potentially derived) TV using the following rules:
- vector and width derived functions commute
{HALF,DOUBLE}VECTOR({HALF,DOUBLE}WIDTH(base)) ->
{HALF,DOUBLE}WIDTH({HALF,DOUBLE}VECTOR(base))
- half/double pairs collapse
{HALF,DOUBLE}WIDTH({DOUBLE,HALF}WIDTH(base)) -> base
{HALF,DOUBLE}VECTOR({DOUBLE,HALF}VECTOR(base)) -> base
"""
vector_derives = [TypeVar.HALFVECTOR, TypeVar.DOUBLEVECTOR]
width_derives = [TypeVar.HALFWIDTH, TypeVar.DOUBLEWIDTH]
if not tv.is_derived:
return tv
df = tv.derived_func
if (tv.base.is_derived):
base_df = tv.base.derived_func
# Reordering: {HALFWIDTH, DOUBLEWIDTH} commute with {HALFVECTOR,
# DOUBLEVECTOR}. Arbitrarily pick WIDTH < VECTOR
if df in vector_derives and base_df in width_derives:
return normalize_tv(
TypeVar.derived(
TypeVar.derived(tv.base.base, df), base_df))
# Cancelling: HALFWIDTH, DOUBLEWIDTH and HALFVECTOR, DOUBLEVECTOR
# cancel each other. Note: This doesn't hide any over/underflows,
# since we 1) assert the safety of each TV in the chain upon its
# creation, and 2) the base typeset is only allowed to shrink.
if (df, base_df) in \
[(TypeVar.HALFVECTOR, TypeVar.DOUBLEVECTOR),
(TypeVar.DOUBLEVECTOR, TypeVar.HALFVECTOR),
(TypeVar.HALFWIDTH, TypeVar.DOUBLEWIDTH),
(TypeVar.DOUBLEWIDTH, TypeVar.HALFWIDTH)]:
return normalize_tv(tv.base.base)
return TypeVar.derived(normalize_tv(tv.base), df)
def constrain_fixpoint(tv1, tv2):
# type: (TypeVar, TypeVar) -> None
"""
Given typevars tv1 and tv2 (which could be derived from one another)
constrain their typesets to be the same. When one is derived from the
other, repeat the constrain process until fixpoint.
"""
# Constrain tv2's typeset as long as tv1's typeset is changing.
while True:
old_tv1_ts = tv1.get_typeset().copy()
tv2.constrain_types(tv1)
if tv1.get_typeset() == old_tv1_ts:
break
old_tv2_ts = tv2.get_typeset().copy()
tv1.constrain_types(tv2)
assert old_tv2_ts == tv2.get_typeset()
def unify(tv1, tv2, typ):
# type: (TypeVar, TypeVar, TypeEnv) -> TypingOrError
"""
Unify tv1 and tv2 in the current type environment typ, and return an
updated type environment or error.
"""
tv1 = normalize_tv(typ[tv1])
tv2 = normalize_tv(typ[tv2])
# Already unified
if tv1 == tv2:
return typ
if typ.rank(tv2) < typ.rank(tv1):
return unify(tv2, tv1, typ)
constrain_fixpoint(tv1, tv2)
if (tv1.get_typeset().size() == 0 or tv2.get_typeset().size() == 0):
return "Error: empty type created when unifying {} and {}"\
.format(tv1, tv2)
# Free -> Derived(Free)
if not tv1.is_derived:
typ.equivalent(tv1, tv2)
return typ
if (tv1.is_derived and TypeVar.is_bijection(tv1.derived_func)):
inv_f = TypeVar.inverse_func(tv1.derived_func)
return unify(tv1.base, normalize_tv(TypeVar.derived(tv2, inv_f)), typ)
typ.add_constraint(TypesEqual(tv1, tv2))
return typ
def move_first(l, i):
# type: (List[T], int) -> List[T]
return [l[i]] + l[:i] + l[i+1:]
def ti_def(definition, typ):
# type: (Def, TypeEnv) -> TypingOrError
"""
Perform type inference on one Def in the current type environment typ and
return an updated type environment or error.
At a high level this works by creating fresh copies of each formal type var
in the Def's instruction's signature, and unifying the formal tv with the
corresponding actual tv.
"""
expr = definition.expr
inst = expr.inst
# Create a dict m mapping each free typevar in the signature of definition
# to a fresh copy of itself.
free_formal_tvs = inst.all_typevars()
m = {tv: tv.get_fresh_copy(str(typ.get_uid())) for tv in free_formal_tvs}
# Update m with any explicitly bound type vars
for (idx, bound_typ) in enumerate(expr.typevars):
m[free_formal_tvs[idx]] = TypeVar.singleton(bound_typ)
# Get fresh copies for each typevar in the signature (both free and
# derived)
fresh_formal_tvs = \
[subst(inst.outs[i].typevar, m) for i in inst.value_results] +\
[subst(inst.ins[i].typevar, m) for i in inst.value_opnums]
# Get the list of actual Vars
actual_vars = [] # type: List[Expr]
actual_vars += [definition.defs[i] for i in inst.value_results]
actual_vars += [expr.args[i] for i in inst.value_opnums]
# Get the list of the actual TypeVars
actual_tvs = []
for v in actual_vars:
assert(isinstance(v, Var))
# Register with TypeEnv that this typevar corresponds ot variable v,
# and thus has a given rank
typ.register(v)
actual_tvs.append(v.get_typevar())
# Make sure we unify the control typevar first.
if inst.is_polymorphic:
idx = fresh_formal_tvs.index(m[inst.ctrl_typevar])
fresh_formal_tvs = move_first(fresh_formal_tvs, idx)
actual_tvs = move_first(actual_tvs, idx)
# Unify each actual typevar with the corresponding fresh formal tv
for (actual_tv, formal_tv) in zip(actual_tvs, fresh_formal_tvs):
typ_or_err = unify(actual_tv, formal_tv, typ)
err = get_error(typ_or_err)
if (err):
return "fail ti on {} <: {}: ".format(actual_tv, formal_tv) + err
typ = get_type_env(typ_or_err)
# Add any instruction specific constraints
for constr in inst.constraints:
typ.add_constraint(constr.translate(m))
return typ
def ti_rtl(rtl, typ):
# type: (Rtl, TypeEnv) -> TypingOrError
"""
Perform type inference on an Rtl in a starting type env typ. Return an
updated type environment or error.
"""
for (i, d) in enumerate(rtl.rtl):
assert (isinstance(d, Def))
typ_or_err = ti_def(d, typ)
err = get_error(typ_or_err) # type: Optional[TypingError]
if (err):
return "On line {}: ".format(i) + err
typ = get_type_env(typ_or_err)
return typ
def ti_xform(xform, typ):
# type: (XForm, TypeEnv) -> TypingOrError
"""
Perform type inference on an Rtl in a starting type env typ. Return an
updated type environment or error.
"""
typ_or_err = ti_rtl(xform.src, typ)
err = get_error(typ_or_err) # type: Optional[TypingError]
if (err):
return "In src pattern: " + err
typ = get_type_env(typ_or_err)
typ_or_err = ti_rtl(xform.dst, typ)
err = get_error(typ_or_err)
if (err):
return "In dst pattern: " + err
typ = get_type_env(typ_or_err)
return get_type_env(typ_or_err)
|
nrc/rustc-perf
|
collector/benchmarks/cranelift-codegen/cranelift-codegen/meta-python/cdsl/ti.py
|
Python
|
mit
| 28,415
|
# coding: utf-8
# Copyright 2014 Globo.com Player authors. All rights reserved.
# Use of this source code is governed by a MIT License
# license that can be found in the LICENSE file.
import sys
PYTHON_MAJOR_VERSION = sys.version_info
import os
import posixpath
try:
import urlparse as url_parser
import urllib2
urlopen = urllib2.urlopen
except ImportError:
import urllib.parse as url_parser
from urllib.request import urlopen as url_opener
urlopen = url_opener
from m3u8.model import M3U8, Playlist, IFramePlaylist, Media, Segment
from m3u8.parser import parse, is_url, ParseError
__all__ = ('M3U8', 'Playlist', 'IFramePlaylist', 'Media',
'Segment', 'loads', 'load', 'parse', 'ParseError')
def loads(content):
'''
Given a string with a m3u8 content, returns a M3U8 object.
Raises ValueError if invalid content
'''
return M3U8(content)
def load(uri):
'''
Retrieves the content from a given URI and returns a M3U8 object.
Raises ValueError if invalid content or IOError if request fails.
'''
if is_url(uri):
return _load_from_uri(uri)
else:
return _load_from_file(uri)
# Support for python3 inspired by https://github.com/szemtiv/m3u8/
def _load_from_uri(uri):
resource = urlopen(uri)
base_uri = _parsed_url(_url_for(uri))
if PYTHON_MAJOR_VERSION < (3,):
content = _read_python2x(resource)
else:
content = _read_python3x(resource)
return M3U8(content, base_uri=base_uri)
def _url_for(uri):
return urlopen(uri).geturl()
def _parsed_url(url):
parsed_url = url_parser.urlparse(url)
prefix = parsed_url.scheme + '://' + parsed_url.netloc
base_path = posixpath.normpath(parsed_url.path + '/..')
return url_parser.urljoin(prefix, base_path)
def _read_python2x(resource):
return resource.read().strip()
def _read_python3x(resource):
return resource.read().decode(resource.headers.get_content_charset(failobj="utf-8"))
def _load_from_file(uri):
with open(uri) as fileobj:
raw_content = fileobj.read().strip()
base_uri = os.path.dirname(uri)
return M3U8(raw_content, base_uri=base_uri)
|
cristina0botez/m3u8
|
m3u8/__init__.py
|
Python
|
mit
| 2,171
|
from tasks.cache import cache_issues
from tasks.cache import cache_pulls
from tasks.cache import cache_commits
from tasks.cache import oldest_issues
from tasks.cache import oldest_pulls
from tasks.cache import least_issues
from tasks.cache import least_pulls
from tasks.cache import issues_closed_since
from tasks.cache import issues_opened_since
from tasks.cache import unassigned_pulls
#add top issue closer
#base stuff
#cache_issues()
#cache_pulls()
#cache_commits()
# filters / views
#oldest_issues()
#oldest_pulls()
#least_issues()
#least_pulls()
#issues_closed_since(start=0, days=7)
#issues_closed_since(start=7, days=14)
issues_opened_since(start=0, days=7)
issues_opened_since(start=7, days=14)
#unassigned_pulls()
|
docker/gordon
|
pkg/legacy/build.py
|
Python
|
mit
| 731
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubResource(Model):
"""SubResource.
:param id: Resource ID.
:type id: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(self, *, id: str=None, **kwargs) -> None:
super(SubResource, self).__init__(**kwargs)
self.id = id
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/sub_resource_py3.py
|
Python
|
mit
| 823
|
# -*- coding: utf-8 -*-
## Part of the pyprimes.py package.
##
## Copyright © 2014 Steven D'Aprano.
## See the file __init__.py for the licence terms for this software.
"""\
=====================================
Timing the speed of primes algorithms
=====================================
"""
from __future__ import division
import sys
from itertools import islice
# Conditionally hack the PYTHONPATH.
if __name__ == '__main__':
import os
path = os.path.dirname(__file__)
parent, here = os.path.split(path)
sys.path.append(parent)
from pyprimes.compat23 import next
import pyprimes.awful as awful
import pyprimes.probabilistic as probabilistic
import pyprimes.sieves as sieves
YEAR100 = 100*365*24*60*60 # One hundred years, in seconds.
class Stopwatch(object):
def __init__(self, timer=None):
if timer is None:
from timeit import default_timer as timer
self.timer = timer
self.reset()
def reset(self):
"""Reset all the collected timer results."""
try:
del self._start
except AttributeError:
pass
self._elapsed = 0.0
def start(self):
"""Start the timer."""
self._start = self.timer()
def stop(self):
"""Stop the timer."""
t = self.timer()
self._elapsed = t - self._start
del self._start
@property
def elapsed(self):
return self._elapsed
def trial(generator, count, repeat=1):
timer = Stopwatch()
best = YEAR100
for i in range(repeat):
it = generator()
timer.reset()
timer.start()
# Go to the count-th prime as fast as possible.
p = next(islice(it, count-1, count))
timer.stop()
best = min(best, timer.elapsed)
return best
def run(generators, number, repeat=1):
print ("Calculating speeds for first %d primes..." % number)
template = "\r ...%d of %d %s"
heading = """\
Generator Elapsed Speed
(sec) (primes/sec)
=============================================================="""
records = []
timer = Stopwatch() # For measuring the total elapsed time.
timer.start()
N = len(generators)
for i, generator in enumerate(generators):
name = generator.__module__ + '.' + generator.__name__
sys.stdout.write((template % (i+1, N, name)).ljust(69))
sys.stdout.flush()
t = trial(generator, number, repeat)
records.append((number/t, t, name))
timer.stop()
sys.stdout.write("\r%-69s\n" % "Done!")
print ('Total elapsed time: %.1f seconds' % timer.elapsed)
print ('')
records.sort()
print (heading)
for speed, elapsed, name in records:
print ("%-36s %4.2f %8.1f" % (name, elapsed, speed))
print ('==============================================================\n')
VERY_SLOW = [awful.primes0, awful.primes1, awful.primes2, awful.turner]
SLOW = [awful.primes3, awful.primes4, probabilistic.primes]
FAST = [sieves.cookbook, sieves.croft, sieves.sieve, sieves.wheel]
MOST = SLOW + FAST
ALL = VERY_SLOW + MOST
run(VERY_SLOW + SLOW, 1000)
run([awful.primes3, awful.trial_division], 5000)
#run([awful.primes3, awful.trial_division], 50000)
#run([awful.primes3, awful.trial_division], 100000)
#run([awful.primes3, awful.trial_division], 200000)
exit()
run(ALL, 500, 3)
run(MOST, 10000)
run(FAST, 1000000)
"""
Python 2.6 or better
import multiprocessing
import time
# bar
def bar():
for i in range(100):
print "Tick"
time.sleep(1)
if __name__ == '__main__':
# Start bar as a process
p = multiprocessing.Process(target=bar)
p.start()
# Wait for 10 seconds or until process finishes
p.join(10)
# If thread is still active
if p.is_alive():
print "running... let's kill it..."
# Terminate
p.terminate()
p.join()
"""
"""
Unix only, Python 2.5 or better.
In [1]: import signal
# Register an handler for the timeout
In [2]: def handler(signum, frame):
...: print "Forever is over!"
...: raise Exception("end of time")
...:
# This function *may* run for an indetermined time...
In [3]: def loop_forever():
...: import time
...: while 1:
...: print "sec"
...: time.sleep(1)
...:
...:
# Register the signal function handler
In [4]: signal.signal(signal.SIGALRM, handler)
Out[4]: 0
# Define a timeout for your function
In [5]: signal.alarm(10)
Out[5]: 0
In [6]: try:
...: loop_forever()
...: except Exception, exc:
...: print exc
....:
sec
sec
sec
sec
sec
sec
sec
sec
Forever is over!
end of time
# Cancel the timer if the function returned before timeout
# (ok, mine won't but yours maybe will :)
In [7]: signal.alarm(0)
Out[7]: 0
"""
|
skilledindia/pyprimes
|
src/pyprimes/speed.py
|
Python
|
mit
| 4,872
|
from Screens.MessageBox import MessageBox
from boxbranding import getMachineBrand, getMachineName
from Screens.ParentalControlSetup import ProtectedScreen
from Components.config import config
from Tools.BoundFunction import boundFunction
from Screens.InputBox import PinInput
class FactoryReset(MessageBox, ProtectedScreen):
def __init__(self, session):
MessageBox.__init__(self, session, _("When you do a factory reset, you will lose ALL your configuration data\n"
"(including bouquets, services, satellite data ...)\n"
"After completion of factory reset, your %s %s will restart automatically!\n\n"
"Really do a factory reset?") % (getMachineBrand(), getMachineName()), MessageBox.TYPE_YESNO, default = False)
self.setTitle(_("Factory reset"))
self.skinName = "MessageBox"
if self.isProtected() and config.ParentalControl.servicepin[0].value:
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.pinEntered, PinInput, pinList=[x.value for x in config.ParentalControl.servicepin], triesEntry=config.ParentalControl.retries.servicepin, title=_("Please enter the correct pin code"), windowTitle=_("Enter pin code")))
def isProtected(self):
return config.ParentalControl.setuppinactive.value and (not config.ParentalControl.config_sections.main_menu.value or hasattr(self.session, 'infobar') and self.session.infobar is None) and config.ParentalControl.config_sections.manufacturer_reset.value
def pinEntered(self, result):
if result is None:
self.closeProtectedScreen()
elif not result:
self.session.openWithCallback(self.close(), MessageBox, _("The pin code you entered is wrong."), MessageBox.TYPE_ERROR, timeout=3)
def closeProtectedScreen(self, result=None):
self.close(None)
|
schleichdi2/OpenNfr_E2_Gui-6.0
|
lib/python/Screens/FactoryReset.py
|
Python
|
gpl-2.0
| 1,747
|
#####################################################################
# -*- coding: iso-8859-1 -*- #
# #
# Frets on Fire X (FoFiX) #
# Copyright (C) 2006 Sami Kyöstilä #
# 2008 myfingershurt #
# 2008 Blazingamer #
# 2008 evilynux <evilynux@gmail.com> #
# #
# This program is free software; you can redistribute it and/or #
# modify it under the terms of the GNU General Public License #
# as published by the Free Software Foundation; either version 2 #
# of the License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, #
# MA 02110-1301, USA. #
#####################################################################
import Log
import Version
import os
import sys
import imp
import Config
import Song
from OpenGL.GL import *
from OpenGL.GLU import *
import string
import math
from Language import _
from Shader import shaders
from Task import Task
from constants import *
#Theme Constants.
GUITARTYPES = [0, 1, 4]
DRUMTYPES = [2, 3]
MICTYPES = [5]
defaultDict = {}
classNames = {'setlist': lambda x: Setlist(x), 'themeLobby': lambda x: ThemeLobby(x), 'partDiff': lambda x: ThemeParts(x)}
def halign(value, default='center'):
try:
return {'left': LEFT,
'center': CENTER,
'right': RIGHT}[value.lower()]
except KeyError:
Log.warn('Invalid horizontal alignment value - defaulting to %s' % default)
return halign(default)
def valign(value, default='middle'):
try:
if value.lower() == 'center':
Log.notice('Use of "center" for vertical alignment is deprecated. Use "middle" instead.')
return {'top': TOP,
'middle': MIDDLE, # for consistency with HTML/CSS terminology
'center': MIDDLE, # for temporary backward compatibility
'bottom': BOTTOM}[value.lower()]
except KeyError:
Log.warn('Invalid vertical alignment value - defaulting to %s' % default)
return valign(default)
class Theme(Task):
def __getattr__(self, attr):
try: #getting to this function is kinda slow. Set it on the first get to keep renders from lagging.
object.__getattribute__(self, '__dict__')[attr] = defaultDict[attr]
Log.debug("No theme variable for %s - Loading default..." % attr)
return object.__getattribute__(self, attr)
except KeyError:
if attr in classNames.keys():
Log.warn("No theme class for %s - Loading default..." % attr)
object.__getattribute__(self, '__dict__')[attr] = classNames[attr](self)
return object.__getattribute__(self, attr)
elif attr.startswith('__') and attr.endswith('__'): #for object's attributes (eg: __hash__, __eq__)
return object.__getattribute__(self, attr)
Log.error("Attempted to load theme variable %s - no default found." % attr)
def __init__(self, path, name):
self.name = name
self.path = path
self.themePath = os.path.join(Version.dataPath(),"themes", name)
if not os.path.exists(self.themePath):
Log.warn("Theme: %s does not exist!\n" % self.themePath)
name = Config.get("coffee", "themename")
Log.notice("Theme: Attempting fallback to default theme \"%s\"." % name)
self.themePath = os.path.join(Version.dataPath(),"themes", name)
if not os.path.exists(self.themePath):
Log.error("Theme: %s does not exist!\nExiting.\n" % self.themePath)
sys.exit(1)
if os.path.exists(os.path.join(self.themePath, "theme.ini")):
self.config = Config.MyConfigParser()
self.config.read(os.path.join(self.themePath, "theme.ini"))
Log.debug("theme.ini loaded")
else:
self.config = None
Log.debug("no theme.ini")
def get(value, type = str, default = None):
if self.config:
if self.config.has_option("theme", value):
if type == bool:
return isTrue(self.config.get("theme", value).lower())
elif type == "color":
return self.hexToColor(self.config.get("theme", value))
else:
return type(self.config.get("theme", value))
if type == "color":
return self.hexToColor(default)
return default
#These colors are very important
#background_color defines what color openGL will clear too
# (the color that shows then no image is present)
#base_color is the default color of text in menus
#selected_color is the color of text when it is selected
# (like in the settings menu or when selecting a song)
self.backgroundColor = get("background_color", "color", "#000000")
self.baseColor = get("base_color", "color", "#FFFFFF")
self.selectedColor = get("selected_color", "color", "#FFBF00")
#notes that are not textured are drawn in 3 parts (Mesh, Mesh_001, Mesh_002, and occasionally Mesh_003)
#The color of mesh is set by mesh_color (on a GH note this is the black ring)
#The color of the Mesh_001 is the color of the note (green, red, yellow, etc)
#Mesh_002 is set by the hopo_color but if Mesh_003 is present it will be colored spot_color
#When Mesh_003 is present it will be colored hopo_color
self.meshColor = get("mesh_color", "color", "#000000")
self.hopoColor = get("hopo_color", "color", "#00AAAA")
self.spotColor = get("spot_color", "color", "#FFFFFF")
#keys when they are not textured are made of three parts (Mesh, Key_001, Key_002),
#two of which can be colored by the CustomTheme.py or the Theme.ini (Mesh, Mesh_002).
#These will only work if the object has a Glow_001 mesh in it, else it will render
#the whole object the color of the fret
#Key_001 is colored by key_color, Key_002 is colored by key2_color, pretty obvious, eh?
self.keyColor = get("key_color", "color", "#333333")
self.key2Color = get("key2_color", "color", "#000000")
#when a note is hit a glow will show aside from the hitflames, this has been around
#since the original Frets on Fire. What glow_color allows you to do is set it so
#the glow is either the color of the fret it's over or it can be the color the image
#actually is (if the image is white then no matter what key is hit the glow will be white)
self.hitGlowColor = get("hit_glow_color", str, "frets")
if not self.hitGlowColor == "frets":
self.hitGlowColor = self.hexToColor(self.hitGlowColor)
#Sets the color of the glow.png
self.glowColor = get("glow_color", str, "frets")
if not self.glowColor == "frets":
self.glowColor = self.hexToColor(self.glowColor)
#Acts similar to the glowColor but its does so for flames instead
self.flamesColor = get("flames_color", str, "frets")
if not self.flamesColor == "frets":
self.flamesColor = self.hexToColor(self.flamesColor)
#Note Colors (this applies to frets and notes)
#default is green, red, yellow, blue, orange, purple (I don't know why there's a 6th color)
default_color = ["#22FF22", "#FF2222", "#FFFF22", "#3333FF", "#FF9933", "#CC22CC"]
self.noteColors = [get("fret%d_color" % i, "color", default_color[i]) for i in range(6)]
self.spNoteColor = get("fretS_color", "color", "#4CB2E5")
#Specifies how the power color is used in-game for both Active power and gaining power
self.powerGainColorToggle = get("power_color_gain_toggle", bool, True)
self.powerActiveColorToggle = get("power_color_active_toggle", bool, False)
#Color of the tails when whammied, default is set to the colors of the frets
self.killNoteColor = get("fretK_color", str, "frets")
if not self.killNoteColor == "frets":
self.killNoteColor = self.hexToColor(self.killNoteColor)
#just like glow_color, this allows you to have tails use either the color of the note
#or the actual color of the tail
self.use_fret_colors = get("use_fret_colors", bool, False)
#themes can define how many frames their hitflames will be.
# Separate variables for hit and hold animation frame counts.
self.HitFlameFrameLimit = get("hit_flame_frame_limit", int, 13)
self.HoldFlameFrameLimit = get("hold_flame_frame_limit", int, 16)
#Lets themers turn alpha = True to alpha = False making black not removed from the flames or glows.
self.hitFlameBlackRemove = get("hit_flame_black_remove", bool, True)
self.hitGlowsBlackRemove = get("hit_Glows_black_remove", bool, True)
#Rotation in degrees for the hitFlames and hitGlows x y and z axix
self.hitFlameRotation = (get("flame_rotation_base", float, 90), get("flame_rotation_x", float, 1), get("flame_rotation_y", float, 0), get("flame_rotation_z", float, 0))
self.hitGlowsRotation = (get("hit_glow_rotation_base", float, 90), get("hit_glow_rotation_x", float, .5), get("hit_glow_rotation_y", float, 0), get("hit_glow_rotation_z", float, 0))
#The rotation offset will offset each flame/glow so that if the themer chooses so
#they can align them with the frets individually
self.hitGlowOffset = (get("hit_glow_offset_0", float, 0), get("hit_glow_offset_1", float, 0), get("hit_glow_offset_2", float, 0), get("hit_glow_offset_3", float, 0), get("hit_glow_offset_4", float, 0))
self.hitFlameOffset = (get("flame_offset_0", float, 0), get("flame_offset_1", float, 0), get("flame_offset_2", float, 0), get("flame_offset_3", float, 0), get("flame_offset_4", float, 0))
self.drumHitFlameOffset = (get("drum_flame_offset_0", float, 0), get("drum_flame_offset_1", float, 0), get("drum_flame_offset_2", float, 0), get("drum_flame_offset_3", float, 0), get("drum_flame_offset_4", float, 0))
#controls the size of the hitflames
self.hitFlameSize = get("hit_flame_size", float, .075)
#controls the y and y position of the hitflames
self.hitFlamePos = (get("hit_flame_y_position", float, .3), get("hit_flame_z_position", float, 0))
#controls the size of the hitflame glows
self.holdFlameSize = get("hold_flame_size", float, .075)
#controls the y position of the hitflames glows
self.holdFlamePos = (get("hold_flame_y_position", int, 0), get("hold_flame_z_position", int, 0))
self.fretPress = get("fretPress", bool, False)
#Point of View (x, y, z)
self.povTarget = (get("pov_target_x", float), get("pov_target_y", float), get("pov_target_z", float))
self.povOrigin = (get("pov_origin_x", float), get("pov_origin_y", float), get("pov_origin_z", float))
#pov presets
self.povPreset = (get("pov_preset", str, "rb"))
#Render necks toggle
self.doNecksRender = (get("render_necks", bool, True))
#Pause menu type
self.pauseMenuType = (get("pause_menu_type", str, "RB"))
#fretboard intro animation
self.povIntroAnimation = (get("fretboard_intro_animation", str, "fofix"))
#Note Tail Speed multiplier
self.noteTailSpeedMulti = (get("note_tail_speed", float, 0))
#Loading phrases
self.loadingPhrase = get("loading_phrase", str, "Let's get this show on the Road_Impress the Crowd_" +
"Don't forget to strum!_Rock the house!_Jurgen is watching").split("_")
self.resultsPhrase = get("results_phrase", str, "").split("_")
#crowd_loop_delay controls how long (in milliseconds) FoFiX needs to wait before
#playing the crowd noise again in the results screen after it finishes
self.crowdLoopDelay = get("crowd_loop_delay", int)
#When a song starts up it displays the info of the song (artist, name, etc)
#positioning and the size of the font are handled by these values respectively
self.songInfoDisplayScale = get("song_info_display_scale", float, 0.0020)
self.songInfoDisplayX = get("song_info_display_X", float, 0.05)
self.songInfoDisplayY = get("song_info_display_Y", float, 0.05)
#when AI is enabled, this value controls where in the player's window
#it should say that "Jurgen is here" and how large the words need to be
self.jurgTextPos = get("jurgen_text_pos", str, "1,1,.00035").split(",")
#just a little misc option that allows you to change the name of what you
#what starpower/overdrive to be called. Some enjoy the classic Jurgen Power
#name from Hering's mod.
self.power_up_name = get("power_up_name", str, "Jurgen Power")
self.countdownPosX = get("countdown_pos_x", float, 0.5)
self.countdownPosY = get("countdown_pos_y", float, 0.45)
#These values determine the width of the neck as well as the length of it
#width seems pretty obvious but length has an advantage in that by making
#it shorter the fade away comes sooner. This is handy for unique POV because
#sometimes static hud object (the lyric display) can get in the way.
self.neckWidth = get("neck_width", float, 3.0)
self.neckLength = get("neck_length", float, 9.0)
#When in the neck choosing screen, these values determine the position of the
#prompt that is usually at the top of the screen and says how to choose a neck
self.neck_prompt_x = get("menu_neck_choose_x", float, 0.1)
self.neck_prompt_y = get("menu_neck_choose_y", float, 0.05)
#Big Rock Ending and Solo Frame Graphics
self.breScoreBackgroundScale = get("breScoreBackgroundScale", float, 1.0)
self.breScoreFrameScale = get("breScoreFrameScale", float, 1.0)
self.soloFrameScale = get("soloFrameScale", float, 1.0)
#Setlist
#This is really a bit of a mess but luckily most of the names are quite self
#explanatory. These values are only necessary if your theme is using the old
#default code that takes advantage of having the 4 different modes
#list, cd, list/cd hybrid, rb2
#if you're not using the default setlist display then don't bother with these values
self.songListDisplay = get("song_list_display", int, 0)
self.setlistguidebuttonsposX = get("setlistguidebuttonsposX", float, 0.408)
self.setlistguidebuttonsposY = get("setlistguidebuttonsposY", float, 0.0322)
self.setlistguidebuttonsscaleX = get("setlistguidebuttonsscaleX", float, 0.29)
self.setlistguidebuttonsscaleY = get("setlistguidebuttonsscaleY", float, 0.308)
self.setlistpreviewbuttonposX = get("setlistpreviewbuttonposX", float, 0.5)
self.setlistpreviewbuttonposY = get("setlistpreviewbuttonposY", float, 0.5)
self.setlistpreviewbuttonscaleX = get("setlistpreviewbuttonscaleX", float, 0.5)
self.setlistpreviewbuttonscaleY = get("setlistpreviewbuttonscaleY", float, 0.5)
self.songSelectSubmenuOffsetLines = get("song_select_submenu_offset_lines")
self.songSelectSubmenuOffsetSpaces = get("song_select_submenu_offset_spaces")
self.songSelectSubmenuX = get("song_select_submenu_x")
self.songSelectSubmenuY = get("song_select_submenu_y")
self.song_cd_Xpos = get("song_cd_x", float, 0.0)
self.song_cdscore_Xpos = get("song_cdscore_x", float, 0.6)
self.song_listcd_cd_Xpos = get("song_listcd_cd_x", float, .75)
self.song_listcd_cd_Ypos = get("song_listcd_cd_y", float, .6)
self.song_listcd_score_Xpos = get("song_listcd_score_x", float, .6)
self.song_listcd_score_Ypos = get("song_listcd_score_y", float, .5)
self.song_listcd_list_Xpos = get("song_listcd_list_x", float, .1)
self.song_list_Xpos = get("song_list_x", float, 0.15)
self.song_listscore_Xpos = get("song_listscore_x", float, 0.8)
self.songlist_score_colorVar = get("songlist_score_color", "color", "#93C351")
self.songlistcd_score_colorVar = get("songlistcd_score_color", "color", "#FFFFFF")
self.career_title_colorVar = get("career_title_color", "color", "#000000")
self.song_name_text_colorVar = get("song_name_text_color", "color", "#FFFFFF")
self.song_name_selected_colorVar = get("song_name_selected_color", "color", "#FFBF00")
self.artist_text_colorVar = get("artist_text_color", "color", "#4080FF")
self.artist_selected_colorVar = get("artist_selected_color", "color", "#4080FF")
self.library_text_colorVar = get("library_text_color", "color", "#FFFFFF")
self.library_selected_colorVar = get("library_selected_color", "color", "#FFBF00")
self.song_rb2_diff_colorVar = get("song_rb2_diff_color", "color", "#FFBF00")
#These determine the position of the version tag on the main menu.
self.versiontagScale = get("versiontagScale", float, 0.5)
self.versiontagposX = get("versiontagposX", float, 0.5)
self.versiontagposY = get("versiontagposY", float, 0.05)
#pause menu and fail menu positions and text colors
self.pause_bkg_pos = get("pause_bkg", str, "0.5,0.5,1.0,1.0").split(",")
self.pause_text_xPos = get("pause_text_x", float)
self.pause_text_yPos = get("pause_text_y", float)
self.pause_text_colorVar = get("pause_text_color", "color", "#FFFFFF")
self.pause_selected_colorVar = get("pause_selected_color", "color", "#FFBF00")
self.fail_completed_colorVar = get("fail_completed_color", "color", "#FFFFFF")
self.fail_text_colorVar = get("fail_text_color", "color", "#FFFFFF")
self.fail_selected_colorVar = get("fail_selected_color", "color", "#FFBF00")
self.fail_bkg_pos = get("fail_bkg", str, "0.5,0.5,1.0,1.0").split(",")
self.fail_text_xPos = get("fail_text_x", float)
self.fail_text_yPos = get("fail_text_y", float)
self.fail_songname_xPos = get("fail_songname_x", float, 0.5)
self.fail_songname_yPos = get("fail_songname_y", float, 0.35)
self.opt_bkg_size = get("opt_bkg", str, "0.5,0.5,1.0,1.0").split(",")
self.opt_text_xPos = get("opt_text_x", float)
self.opt_text_yPos = get("opt_text_y", float)
self.opt_text_colorVar = get("opt_text_color", "color", "#FFFFFF")
self.opt_selected_colorVar = get("opt_selected_color", "color", "#FFBF00")
#main menu system
self.menuPos = [get("menu_x", float, 0.2), get("menu_y", float, 0.8)]
self.menuRB = get("rbmenu", bool, False)
self.main_menu_scaleVar = get("main_menu_scale", float, 0.5)
self.main_menu_vspacingVar = get("main_menu_vspacing", float, .09)
self.use_solo_submenu = get("use_solo_submenu", bool, True)
#Settings option scale
self.settingsmenuScale = get("settings_menu_scale", float, 0.002)
#loading Parameters
self.loadingX = get("loading_x", float, 0.5)
self.loadingY = get("loading_y", float, 0.6)
self.loadingFScale = get("loading_font_scale", float, 0.0015)
self.loadingRMargin = get("loading_right_margin", float, 1.0)
self.loadingLSpacing = get("loading_line_spacing", float, 1.0)
self.loadingColor = get("loading_text_color", "color", "#FFFFFF")
#this is the amount you can offset the shadow in the loading screen text
self.shadowoffsetx = get("shadowoffsetx", float, .0022)
self.shadowoffsety = get("shadowoffsety", float, .0005)
self.sub_menu_xVar = get("sub_menu_x", float, None)
self.sub_menu_yVar = get("sub_menu_y", float, None)
#self.songback = get("songback")
self.versiontag = get("versiontag", bool, False)
#these are the little help messages at the bottom of the
#options screen when you hover over an item
self.menuTipTextY = get("menu_tip_text_y", float, .7)
self.menuTipTextFont = get("menu_tip_text_font", str, "font")
self.menuTipTextScale = get("menu_tip_text_scale", float, .002)
self.menuTipTextColor = get("menu_tip_text_color", "color", "#FFFFFF")
self.menuTipTextScrollSpace = get("menu_tip_text_scroll_space", float, .25)
self.menuTipTextScrollMode = get("menu_tip_text_scroll_mode", int, 0)
self.menuTipTextDisplay = get("menu_tip_text_display", bool, False)
#Lobby
self.controlActivateX = get("control_activate_x", float, 0.645)
self.controlActivateSelectX = get("control_activate_select_x", float, 0.5)
self.controlActivatePartX = get("control_activate_part_x", float, 0.41)
self.controlActivateY = get("control_activate_y", float, 0.18)
self.controlActivateScale = get("control_activate_scale", float, 0.0018)
self.controlActivateSpace = get("control_activate_part_size", float, 22.000)
self.controlActivatePartSize = get("control_activate_space", float, 0.045)
self.controlActivateFont = get("control_activate_font", str, "font")
self.controlDescriptionX = get("control_description_x", float, 0.5)
self.controlDescriptionY = get("control_description_y", float, 0.13)
self.controlDescriptionScale = get("control_description_scale", float, 0.002)
self.controlDescriptionFont = get("control_description_font", str, "font")
self.controlCheckX = get("control_description_scale", float, 0.002)
self.controlCheckY = get("control_check_x", float, 0.16)
self.controlCheckTextY = get("control_check_text_y", float, 0.61)
self.controlCheckPartMult = get("control_check_part_mult", float, 2.8)
self.controlCheckScale = get("control_check_space", float, 0.23)
self.controlCheckSpace = get("control_check_scale", float, 0.0018)
self.controlCheckFont = get("control_check_font", str, "font")
self.lobbyMode = get("lobby_mode", int, 0)
self.lobbyPreviewX = get("lobby_preview_x", float, 0.7)
self.lobbyPreviewY = get("lobby_preview_y", float, 0.0)
self.lobbyPreviewSpacing = get("lobby_preview_spacing", float, 0.04)
self.lobbyTitleX = get("lobby_title_x", float, 0.5)
self.lobbyTitleY = get("lobby_title_y", float, 0.07)
self.lobbyTitleCharacterX = get("lobby_title_character_x", float, 0.26)
self.lobbyTitleCharacterY = get("lobby_title_character_y", float, 0.24)
self.lobbyTitleScale = get("lobby_title_scale", float, 0.0024)
self.lobbyTitleFont = get("lobby_title_font", str, "loadingFont")
self.lobbyAvatarX = get("lobby_avatar_x", float, 0.7)
self.lobbyAvatarY = get("lobby_avatar_y", float, 0.75)
self.lobbyAvatarScale = get("lobby_avatar_scale", float, 1.0)
self.lobbySelectX = get("lobby_select_x", float, 0.4)
self.lobbySelectY = get("lobby_select_y", float, 0.32)
self.lobbySelectImageX = get("lobby_select_image_x", float, 0.255)
self.lobbySelectImageY = get("lobby_select_image_y", float, 0.335)
self.lobbySelectScale = get("lobby_select_scale", float, 0.0018)
self.lobbySelectSpace = get("lobby_select_space", float, 0.04)
self.lobbySelectFont = get("lobby_select_font", str, "font")
self.lobbySelectLength = get("lobby_select_length", int, 5)
self.lobbyTitleColor = get("lobby_title_color", "color", "#FFFFFF")
self.lobbyInfoColor = get("lobby_info_color", "color", "#FFFFFF")
self.lobbyFontColor = get("lobby_font_color", "color", "#FFFFFF")
self.lobbyPlayerColor = get("lobby_player_color", "color", "#FFFFFF")
self.lobbySelectColor = get("lobby_select_color", "color", "#FFBF00")
self.lobbyDisableColor = get("lobby_disable_color", "color", "#666666")
self.characterCreateX = get("character_create_x", float, 0.25)
self.characterCreateY = get("character_create_y", float, 0.15)
self.characterCreateHelpX = get("character_create_help_x", float, 0.5)
self.characterCreateHelpY = get("character_create_help_y", float, 0.73)
self.characterCreateScale = get("character_create_scale", float, 0.0018)
self.characterCreateSpace = get("character_create_space", float, 0.045)
self.characterCreateHelpScale = get("character_create_help_scale", float, 0.0018)
self.characterCreateOptionX = get("character_create_option_x", float, 0.75)
self.characterCreateOptionFont = get("character_create_option_font", str, "font")
self.characterCreateHelpFont = get("character_create_help_font", str, "loadingFont")
self.characterCreateFontColor = get("character_create_font_color", "color", "#FFFFFF")
self.characterCreateSelectColor = get("character_create_select_color", "color", "#FFBF00")
self.characterCreateHelpColor = get("character_create_help_color", "color", "#FFFFFF")
self.avatarSelectTextX = get("avatar_select_text_x", float, 0.44)
self.avatarSelectTextY = get("avatar_select_text_y", float, 0.16)
self.avatarSelectTextScale = get("avatar_select_text_scale", float, 0.0027)
self.avatarSelectAvX = get("avatar_select_avatar_x", float, 0.667)
self.avatarSelectAvY = get("avatar_select_avatar_y", float, 0.5)
self.avatarSelectWheelY = get("avatar_select_wheel_y", float, 0.0)
self.avatarSelectFont = get("avatar_select_font", str, "font")
self.lobbyPanelAvatarDimension = (get("lobbyPanelAvatarWidth", float, 200.00),
get("lobbyPanelAvatarHeight", float, 110.00))
self.lobbyTitleText = get("lobbyTitleText", str, "Lobby")
self.lobbyTitleTextPos = (get("lobbyTitleTextX", str, 0.3),
get("lobbyTitleTextY", float, 0.015))
self.lobbyTitleTextAlign = halign(get("lobbyTitleTextAlign", str, "CENTER"))
self.lobbyTitleTextScale = get("lobbyTitleTextScale", float, .001)
self.lobbyTitleTextFont = get("lobbyTitleTextFont", str, "font")
self.lobbySubtitleText = get("lobbySubtitleText", str, "Choose Your Character!")
self.lobbySubtitleTextPos = (get("lobbySubtitleTextX", float, 0.5),
get("lobbySubtitleTextY", float, 0.015))
self.lobbySubtitleTextScale = get("lobbySubtitleTextScale", float, .0015)
self.lobbySubtitleTextFont = get("lobbySubtitleTextFont", str, "font")
self.lobbySubtitleTextAlign = halign(get("lobbySubtitleTextAlign", str, "CENTER"))
self.lobbyOptionScale = get("lobbyOptionScale", float, .001)
self.lobbyOptionAlign = halign(get("lobbyOptionAlign", str, "CENTER"))
self.lobbyOptionFont = get("lobbyOptionFont", str, "font")
self.lobbyOptionPos = (get("lobbyOptionX", float, .5),
get("lobbyOptionY", float, .46))
self.lobbyOptionSpace = get("lobbyOptionSpace", float, .04)
self.lobbyOptionColor = get("lobbyOptionColor", "color", "#FFFFFF")
self.lobbySaveCharScale = get("lobbySaveCharScale", float, .001)
self.lobbySaveCharAlign = halign(get("lobbySaveCharAlign", str, "CENTER"))
self.lobbySaveCharFont = get("lobbySaveCharFont", str, "font")
self.lobbySaveCharColor = get("lobbySaveCharColor", "color", "#FFFFFF")
self.lobbyGameModePos = (get("lobbyGameModeX", float, 0.7),
get("lobbyGameModeY", float, 0.015))
self.lobbyGameModeScale = get("lobbyGameModeScale", float, .001)
self.lobbyGameModeAlign = halign(get("lobbyGameModeAlign", str, "CENTER"))
self.lobbyGameModeFont = get("lobbyGameModeFont", str, "font")
self.lobbyGameModeColor = get("lobbyGameModeColor", "color", "#FFFFFF")
self.lobbyPanelNamePos = (get("lobbyPanelNameX", float, 0.0),
get("lobbyPanelNameY", float, 0.0))
self.lobbyPanelNameFont = get("lobbyPanelNameFont", str, "font")
self.lobbyPanelNameScale = get("lobbyPanelNameScale", float, .001)
self.lobbyPanelNameAlign = halign(get("lobbyPanelNameAlign", str, "LEFT"), 'left')
self.lobbyControlPos = (get("lobbyControlX", float, .5),
get("lobbyControlY", float, .375))
self.lobbyControlFont = get("lobbyControlFont", str, "font")
self.lobbyControlScale = get("lobbyControlScale", float, .0025)
self.lobbyControlAlign = halign(get("lobbyControlAlign", str, "CENTER"))
self.lobbyHeaderColor = get("lobbyHeaderColor", "color", "#FFFFFF")
self.lobbySelectLength = get("lobbySelectLength", int, 4)
self.lobbyPartScale = get("lobbyPartScale", float, .25)
self.lobbyPartPos = (get("lobbyPartX", float, .5),
get("lobbyPartY", float, .52))
self.lobbyControlImgScale = get("lobbyControlImgScale", float, .25)
self.lobbyControlImgPos = (get("lobbyControlImgX", float, .5),
get("lobbyControlImgY", float, .55))
self.lobbyKeyboardImgScale = get("lobbyKeyboardImgScale", float, .1)
self.lobbyKeyboardImgPos = (get("lobbyKeyboardImgX", float, .8),
get("lobbyKeyboardImgY", float, .95))
self.lobbySelectedColor = get("lobbySelectedColor", "color", "#FFFF66")
self.lobbyDisabledColor = get("lobbyDisabledColor", "color", "#BBBBBB")
self.lobbyPanelSize = (get("lobbyPanelWidth", float, .2),
get("lobbyPanelHeight", float, .8))
self.lobbyPanelPos = (get("lobbyPanelX", float, .04),
get("lobbyPanelY", float, .1))
self.lobbyPanelSpacing = get("lobbyPanelSpacing", float, .24)
self.partDiffTitleText = get("partDiffTitleText", str, "Select a Part and Difficulty")
self.partDiffTitleTextPos = (get("partDiffTitleTextX", float, .5),
get("partDiffTitleTextY", float, .1))
self.partDiffTitleTextAlign = halign(get("partDiffTitleTextAlign", str, "CENTER"))
self.partDiffTitleTextScale = get("partDiffTitleTextScale", float, .0025)
self.partDiffTitleTextFont = get("partDiffTitleTextFont", str, "font")
self.partDiffSubtitleText = get("partDiffSubtitleText", str, "Ready to Play!")
self.partDiffSubtitleTextPos = (get("partDiffSubtitleX", float, .5),
get("partDiffSubtitleY", float, .15))
self.partDiffSubtitleTextAlign = halign(get("partDiffSubtitleTextAlign", str, "CENTER"))
self.partDiffSubtitleTextScale = get("partDiffSubtitleTextScale", float, .0015)
self.partDiffSubtitleTextFont = get("partDiffSubtitleTextFont", str, "font")
self.partDiffOptionScale = get("partDiffOptionScale", float, .001)
self.partDiffOptionAlign = halign(get("partDiffOptionAlign", str, "CENTER"))
self.partDiffOptionFont = get("partDiffOptionFont", str, "font")
self.partDiffOptionPos = (get("partDiffOptionX", float, .5),
get("partDiffOptionY", float, .46))
self.partDiffOptionSpace = get("partDiffOptionScale", float, .04)
self.partDiffOptionColor = get("partDiffOptionColor", "color", "#FFFFFF")
self.partDiffSelectedColor = get("partDiffSelectedColor", "color", "#FFFF66")
self.partDiffGameModeScale = get("partDiffGameModeScale", float, .001)
self.partDiffGameModeAlign = halign(get("partDiffGameModeAlign", str, "CENTER"))
self.partDiffGameModeFont = get("partDiffGameModeFont", str, "font")
self.partDiffGameModePos = (get("partDiffGameModeX", float, .07),
get("partDiffGameModeY", float, .015))
self.partDiffGameModeColor = get("partDiffGameModeColor", "color", "#FFFFFF")
self.partDiffPanelNameScale = get("partDiffPanelNameScale", float, .001)
self.partDiffPanelNameAlign = halign(get("partDiffPanelNameAlign", str, "LEFT"), 'left')
self.partDiffPanelNameFont = get("partDiffPanelNameFont", str, "font")
self.partDiffPanelNamePos = (get("partDiffPanelNameX", float, 0.0),
get("partDiffPanelNameY", float, 0.0))
self.partDiffControlScale = get("partDiffControlScale", float, .0025)
self.partDiffControlAlign = halign(get("partDiffControlAlign", str, "CENTER"))
self.partDiffControlFont = get("partDiffControlFont", str, "font")
self.partDiffControlPos = (get("partDiffControlX", float, .5),
get("partDiffControlY", float, .375))
self.partDiffHeaderColor = get("partDiffHeaderColor", "color", "#FFFFFF")
self.partDiffPartScale = get("partDiffPartScale", float, .25)
self.partDiffPartPos = (get("partDiffPartX", float, .5),
get("partDiffpartY", float, .52))
self.partDiffKeyboardImgScale = get("partDiffKeyboardImgScale", float, .1)
self.partDiffKeyboardImgPos = (get("partDiffKeyboardImgX", float, .8),
get("partDiffKeyboardImgY", float, .95))
self.partDiffPanelSpacing = get("partDiffPanelSpacing", float, .24)
self.partDiffPanelPos = (get("partDiffPanelX", float, .04),
get("partDiffPanelY", float, .1))
self.partDiffPanelSize = (get("partDiffPanelWidth", float, .2),
get("partDiffPanelHeight", float, .8))
#Vocal mode
self.vocalMeterSize = get("vocal_meter_size", float, 45.000)
self.vocalMeterX = get("vocal_meter_x", float, .25)
self.vocalMeterY = get("vocal_meter_y", float, .8)
self.vocalMultX = get("vocal_mult_x", float, .28)
self.vocalMultY = get("vocal_mult_y", float, .8)
self.vocalPowerX = get("vocal_power_x", float, .5)
self.vocalPowerY = get("vocal_power_y", float, .8)
self.vocalFillupCenterX = get("vocal_fillup_center_x", int, 139)
self.vocalFillupCenterY = get("vocal_fillup_center_y", int, 151)
self.vocalFillupInRadius = get("vocal_fillup_in_radius", int, 25)
self.vocalFillupOutRadius = get("vocal_fillup_out_radius", int, 139)
self.vocalFillupFactor = get("vocal_fillup_factor", float, 300.000)
self.vocalFillupColor = get("vocal_fillup_color", "color", "#DFDFDE")
self.vocalCircularFillup = get("vocal_circular_fillup", bool, True)
self.vocalLaneSize = get("vocal_lane_size", float, .002)
self.vocalGlowSize = get("vocal_glow_size", float, .012)
self.vocalGlowFade = get("vocal_glow_fade", float, .6)
self.vocalLaneColor = get("vocal_lane_color", "color", "#99FF80")
self.vocalShadowColor = get("vocal_shadow_color", "color", "#CCFFBF")
self.vocalGlowColor = get("vocal_glow_color", "color", "#33FF00")
self.vocalLaneColorStar = get("vocal_lane_color_star", "color", "#FFFF80")
self.vocalShadowColorStar = get("vocal_shadow_color_star", "color", "#FFFFBF")
self.vocalGlowColorStar = get("vocal_glow_color_star", "color", "#FFFF00")
#3D Note/Fret rendering system
self.twoDnote = get("twoDnote", bool, True)
self.twoDkeys = get("twoDkeys", bool, True)
#3D notes spin when they are star power notes
self.threeDspin = get("threeDspin", bool, False)
#configure rotation and positioning along the neck for the 3d objects scrolling down
self.noterot = [get("noterot"+str(i+1), float, 0) for i in range(5)]
self.keyrot = [get("keyrot"+str(i+1), float, 0) for i in range(5)]
self.drumnoterot = [get("drumnoterot"+str(i+1), float, 0) for i in range(5)]
self.drumkeyrot = [get("drumkeyrot"+str(i+1), float, 0) for i in range(5)]
self.notepos = [get("notepos"+str(i+1), float, 0) for i in range(5)]
self.keypos = [get("keypos"+str(i+1), float, 0) for i in range(5)]
self.drumnotepos = [get("drumnotepos"+str(i+1), float, 0) for i in range(5)]
self.drumkeypos = [get("drumkeypos"+str(i+1), float, 0) for i in range(5)]
#3D setting for making the notes always face the camera
self.billboardNote = get("billboardNote", bool, True)
self.shaderSolocolor = get("shaderSoloColor", "color", "#0000FF")
#In-game rendering
self.hopoIndicatorX = get("hopo_indicator_x")
self.hopoIndicatorY = get("hopo_indicator_y")
self.hopoIndicatorActiveColor = get("hopo_indicator_active_color", "color", "#FFFFFF")
self.hopoIndicatorInactiveColor = get("hopo_indicator_inactive_color", "color", "#666666")
self.markSolos = get("mark_solo_sections", int, 2)
self.ingame_stats_colorVar = get("ingame_stats_color", "color", "#FFFFFF")
self.fpsRenderPos = (get("fps_display_pos_x", float, .85), get("fps_display_pos_y", float, .055))
#Game results scene
self.result_score = get("result_score", str, ".5,.11,0.0025,None,None").split(",")
self.result_star = get("result_star", str, ".5,.4,0.15,1.1").split(",")
self.result_song = get("result_song", str, ".05,.045,.002,None,None").split(",")
self.result_song_form = get("result_song_form", int, 0)
self.result_song_text = get("result_song_text", str, "%s Finished!").strip()
self.result_stats_part = get("result_stats_part", str, ".5,.64,0.002,None,None").split(",")
self.result_stats_part_text = get("result_stats_part_text", str, "Part: %s").strip()
self.result_stats_name = get("result_stats_name", str, ".5,.73,0.002,None,None").split(",")
self.result_stats_diff = get("result_stats_diff", str, ".5,.55,0.002,None,None").split(",")
self.result_stats_diff_text = get("result_stats_diff_text", str, "Difficulty: %s").strip()
self.result_stats_accuracy = get("result_stats_accuracy", str, ".5,.61,0.002,None,None").split(",")
self.result_stats_accuracy_text = get("result_stats_accuracy_text", str, "Accuracy: %.1f%%").strip()
self.result_stats_streak = get("result_stats_streak", str, ".5,.58,0.002,None,None").split(",")
self.result_stats_streak_text = get("result_stats_streak_text", str, "Long Streak: %s").strip()
self.result_stats_notes = get("result_stats_notes", str, ".5,.52,0.002,None,None").split(",")
self.result_stats_notes_text = get("result_stats_notes_text", str, "%s Notes Hit").strip()
self.result_cheats_info = get("result_cheats_info", str, ".5,.3,.002").split(",")
self.result_cheats_numbers = get("result_cheats_numbers", str, ".5,.35,.0015").split(",")
self.result_cheats_percent = get("result_cheats_percent", str, ".45,.4,.0015").split(",")
self.result_cheats_score = get("result_cheats_score", str, ".75,.4,.0015").split(",")
self.result_cheats_color = get("result_cheats_color", "color", "#FFFFFF")
self.result_cheats_font = get("result_cheats_font", str, "font")
self.result_high_score_font = get("result_high_score_font", str, "font")
self.result_menu_x = get("result_menu_x", float, .5)
self.result_menu_y = get("result_menu_y", float, .2)
self.result_star_type = get("result_star_type", int, 0)
#Submenus
self.submenuScale = {}
self.submenuX = {}
self.submenuY = {}
self.submenuVSpace = {}
if os.path.exists(os.path.join(self.themePath,"menu")):
allfiles = os.listdir(os.path.join(self.themePath,"menu"))
listmenu = []
for name in allfiles:
if name.find("text") > -1:
found = os.path.splitext(name)[0]
if found == "maintext":
continue
Config.define("theme", found, str, None)
self.submenuScale[found] = None
self.submenuX[found] = None
self.submenuY[found] = None
self.submenuVSpace[found] = None
listmenu.append(found)
for i in listmenu:
if i == "maintext":
continue
if self.submenuX[i]:
self.submenuX[i] = get(i).split(",")[0].strip()
if self.submenuY[i]:
self.submenuY[i] = get(i).split(",")[1].strip()
if self.submenuScale[i]:
self.submenuScale[i] = get(i).split(",")[2].strip()
if self.submenuVSpace[i]:
self.submenuVSpace[i] = get(i).split(",")[3].strip()
def setSelectedColor(self, alpha = 1.0):
glColor4f(*(self.selectedColor + (alpha,)))
def setBaseColor(self, alpha = 1.0):
glColor4f(*(self.baseColor + (alpha,)))
def hexToColorResults(self, color):
if isinstance(color, tuple):
return color
elif color is None:
return self.baseColor
color = color.strip()
if color[0] == "#":
color = color[1:]
if len(color) == 3:
return (int(color[0], 16) / 15.0, int(color[1], 16) / 15.0, int(color[2], 16) / 15.0)
return (int(color[0:2], 16) / 255.0, int(color[2:4], 16) / 255.0, int(color[4:6], 16) / 255.0)
return self.baseColor
@staticmethod
def hexToColor(color):
if isinstance(color, tuple):
return color
elif color is None:
return (0,0,0)
if color[0] == "#":
color = color[1:]
if len(color) == 3:
return (int(color[0], 16) / 15.0, int(color[1], 16) / 15.0, int(color[2], 16) / 15.0)
elif len(color) == 4:
return (int(color[0], 16) / 15.0, int(color[1], 16) / 15.0, int(color[2], 16) / 15.0, int(color[3], 16) / 15.0)
elif len(color) == 8:
return (int(color[0:2], 16) / 255.0, int(color[2:4], 16) / 255.0, int(color[4:6], 16) / 255.0, int(color[6:8], 16) / 255.0)
return (int(color[0:2], 16) / 255.0, int(color[2:4], 16) / 255.0, int(color[4:6], 16) / 255.0)
elif color.lower() == "off":
return (-1, -1, -1)
elif color.lower() == "fret":
return (-2, -2, -2)
return (0, 0, 0)
def rgbToColor(self, color):
retVal = []
for c in color:
if isinstance(c, int) and c > 1:
retVal.append(float(c)/255.0)
return tuple(retVal)
@staticmethod
def colorToHex(color):
if isinstance(color, str):
return color
return "#" + ("".join(["%02x" % int(c * 255) for c in color]))
def packTupleKey(self, key, type = str):
vals = key.split(',')
if isinstance(type, list):
retval = tuple(type[i](n.strip()) for i, n in enumerate(vals))
else:
retval = tuple(type(n.strip()) for n in vals)
return retval
def loadThemeModule(self, moduleName):
try:
fp, pathname, description = imp.find_module(moduleName,[self.path])
module = imp.load_module(moduleName, fp, pathname, description)
if moduleName in ["CustomLobby", "ThemeLobby"]:
return module.CustomLobby(self)
elif moduleName in ["CustomSetlist", "Setlist"]:
return module.CustomSetlist(self)
elif moduleName in ["CustomParts", "ThemeParts"]:
return module.CustomParts(self)
else:
return None
except ImportError:
if moduleName in ["CustomLobby", "ThemeLobby"]:
return ThemeLobby(self)
elif moduleName in ["CustomSetlist", "Setlist"]:
return Setlist(self)
elif moduleName in ["CustomParts", "ThemeParts"]:
return ThemeParts(self)
else:
return None
def run(self, ticks):
pass
class ThemeLobby:
def __init__(self, theme):
self.theme = theme
self.currentImage = -1
self.nextImage = 0
self.fadeTime = 2500
def run(self, ticks, lobby):
self.fadeTime += ticks
if self.fadeTime >= 2500:
self.fadeTime -= 2500
self.currentImage = (self.currentImage + 1)%4
i = self.currentImage
while not lobby.partImages[self.currentImage]:
self.currentImage = (self.currentImage + 1)%4
if i == self.currentImage:
break
if lobby.partImages[self.currentImage]:
self.nextImage = (self.currentImage + 1)%4
i = self.nextImage
while not lobby.partImages[self.nextImage]:
self.nextImage = (self.nextImage + 1)%4
if i == self.nextImage:
break
def drawPartImage(self, lobby, type, scale, coord):
if not lobby.partImages[self.currentImage]:
return
if type in GUITARTYPES:
if self.fadeTime < 1000 or self.nextImage == self.currentImage:
lobby.drawImage(lobby.partImages[self.currentImage], scale = scale, coord = coord)
else:
lobby.drawImage(lobby.partImages[self.currentImage], scale = scale, coord = coord, color = (1,1,1,((2500.0-self.fadeTime)/1500.0)))
lobby.drawImage(lobby.partImages[self.nextImage], scale = scale, coord = coord, color = (1,1,1,((self.fadeTime-1000.0)/1500.0)))
glColor4f(1,1,1,1)
elif type in DRUMTYPES:
if lobby.partImages[4]:
lobby.drawImage(lobby.partImages[4], scale = scale, coord = coord)
else:
if lobby.partImages[5]:
lobby.drawImage(lobby.partImages[5], scale = scale, coord = coord)
def renderPanels(self, lobby):
x = self.theme.lobbyPanelPos[0]
y = self.theme.lobbyPanelPos[1]
w, h = lobby.geometry
controlFont = lobby.fontDict[self.theme.lobbyControlFont]
panelNameFont = lobby.fontDict[self.theme.lobbyPanelNameFont]
optionFont = lobby.fontDict[self.theme.lobbyOptionFont]
wP = w*self.theme.lobbyPanelSize[0]
hP = h*self.theme.lobbyPanelSize[1]
glColor3f(*self.theme.lobbyHeaderColor)
if self.theme.lobbyTitleText:
lobby.fontDict[self.theme.lobbyTitleTextFont].render(self.theme.lobbyTitleText, self.theme.lobbyTitleTextPos, scale = self.theme.lobbyTitleTextScale, align = self.theme.lobbyTitleTextAlign)
if self.theme.lobbySubtitleText:
lobby.fontDict[self.theme.lobbySubtitleTextFont].render(self.theme.lobbySubtitleText, self.theme.lobbySubtitleTextPos, scale = self.theme.lobbySubtitleTextScale, align = self.theme.lobbySubtitleTextAlign)
lobby.fontDict[self.theme.lobbyGameModeFont].render(lobby.gameModeText, self.theme.lobbyGameModePos, scale = self.theme.lobbyGameModeScale, align = self.theme.lobbyGameModeAlign)
for i in range(4):
j = lobby.panelOrder[i]
if j in lobby.blockedPlayers or len(lobby.selectedPlayers) == lobby.maxPlayers:
glColor3f(*self.theme.lobbyDisabledColor)
else:
glColor3f(*self.theme.lobbyHeaderColor)
if i == lobby.keyControl and lobby.img_keyboard_panel:
lobby.drawImage(lobby.img_keyboard_panel, scale = (self.theme.lobbyPanelSize[0], -self.theme.lobbyPanelSize[1]), coord = (wP*.5+w*x,hP*.5+h*y), stretched = FULL_SCREEN)
elif lobby.img_panel:
lobby.drawImage(lobby.img_panel, scale = (self.theme.lobbyPanelSize[0], -self.theme.lobbyPanelSize[1]), coord = (wP*.5+w*x,hP*.5+h*y), stretched = FULL_SCREEN)
if i == lobby.keyControl and lobby.img_keyboard:
lobby.drawImage(lobby.img_keyboard, scale = (self.theme.lobbyKeyboardImgScale, -self.theme.lobbyKeyboardImgScale), coord = (wP*self.theme.lobbyKeyboardImgPos[0]+w*x, hP*self.theme.lobbyKeyboardImgPos[1]+h*y))
controlFont.render(lobby.controls[j], (self.theme.lobbyPanelSize[0]*self.theme.lobbyControlPos[0]+x, self.theme.lobbyPanelSize[1]*self.theme.lobbyControlPos[1]+y), scale = self.theme.lobbyControlScale, align = self.theme.lobbyControlAlign, new = True)
self.drawPartImage(lobby, lobby.types[j], scale = (self.theme.lobbyPartScale, -self.theme.lobbyPartScale), coord = (wP*self.theme.lobbyPartPos[0]+w*x, hP*self.theme.lobbyPartPos[1]+h*y))
#self.drawControlImage(lobby, lobby.types[j], scale = (self.theme.lobbyControlImgScale, -self.theme.lobbyControlImgScale), coord = (wP*self.theme.lobbyControlImgPos[0]+w*x, hP*self.theme.lobbyControlImgPos[1]+h*y))
panelNameFont.render(lobby.options[lobby.selected[j]].lower(), (x+w*self.theme.lobbyPanelNamePos[0], y+h*self.theme.lobbyPanelNamePos[1]), scale = self.theme.lobbyPanelNameScale, align = self.theme.lobbyPanelNameAlign, new = True)
for l, k in enumerate(range(lobby.pos[j][0], lobby.pos[j][1]+1)):
if k >= len(lobby.options):
break
if lobby.selected[j] == k and (j not in lobby.blockedPlayers or j in lobby.selectedPlayers):
if lobby.img_selected:
lobby.drawImage(lobby.img_selected, scale = (.5, -.5), coord = (wP*.5+w*x, hP*(.46*.75)+h*y-(h*.04*l)/.75))
if lobby.avatars[k]:
lobby.drawImage(lobby.avatars[k], scale = (lobby.avatarScale[k], -lobby.avatarScale[k]), coord = (wP*.5+w*x, hP*.7+h*y))
elif k == 0 and lobby.img_newchar_av:
lobby.drawImage(lobby.img_newchar_av, scale = (lobby.newCharAvScale, -lobby.newCharAvScale), coord = (wP*.5+w*x, hP*.7+h*y))
elif lobby.img_default_av:
lobby.drawImage(lobby.img_default_av, scale = (lobby.defaultAvScale, -lobby.defaultAvScale), coord = (wP*.5+w*x, hP*.7+h*y))
glColor3f(*self.theme.lobbySelectedColor)
elif k in lobby.blockedItems or j in lobby.blockedPlayers:
glColor3f(*self.theme.lobbyDisabledColor)
else:
glColor3f(*self.theme.lobbyOptionColor)
if k == 1:
if lobby.img_save_char:
lobby.drawImage(lobby.img_save_char, scale = (.5, -.5), coord = (wP*.5+w*x, hP*(.46*.75)+h*y-(h*.04*l)/.75))
else:
glColor3f(*self.theme.lobbySaveCharColor)
lobby.fontDict[self.theme.lobbySaveCharFont].render(lobby.options[k], (self.theme.lobbyPanelSize[0]*self.theme.lobbyOptionPos[0]+x,self.theme.lobbyPanelSize[1]*self.theme.lobbyOptionPos[1]+y+self.theme.lobbyOptionSpace*l), scale = self.theme.lobbySaveCharScale, align = self.theme.lobbySaveCharAlign, new = True)
else:
optionFont.render(lobby.options[k], (self.theme.lobbyPanelSize[0]*self.theme.lobbyOptionPos[0]+x,self.theme.lobbyPanelSize[1]*self.theme.lobbyOptionPos[1]+y+self.theme.lobbyOptionSpace*l), scale = self.theme.lobbyOptionScale, align = self.theme.lobbyOptionAlign, new = True)
x += self.theme.lobbyPanelSpacing
class ThemeParts:
def __init__(self, theme):
self.theme = theme
def run(self, ticks):
pass
def drawPartImage(self, dialog, part, scale, coord):
if part in [0, 2, 4, 5]:
if dialog.partImages[part]:
dialog.drawImage(dialog.partImages[part], scale = scale, coord = coord)
else:
if dialog.partImages[part]:
dialog.drawImage(dialog.partImages[part], scale = scale, coord = coord)
else:
if dialog.partImages[0]:
dialog.drawImage(dialog.partImages[0], scale = scale, coord = coord)
def renderPanels(self, dialog):
x = self.theme.partDiffPanelPos[0]
y = self.theme.partDiffPanelPos[1]
w, h = dialog.geometry
font = dialog.fontDict['font']
controlFont = dialog.fontDict[self.theme.partDiffControlFont]
panelNameFont = dialog.fontDict[self.theme.partDiffPanelNameFont]
wP = w*self.theme.partDiffPanelSize[0]
hP = h*self.theme.partDiffPanelSize[1]
glColor3f(*self.theme.partDiffHeaderColor)
dialog.engine.fadeScreen(-2.00)
if self.theme.partDiffTitleText:
dialog.fontDict[self.theme.partDiffTitleTextFont].render(self.theme.partDiffTitleText, self.theme.partDiffTitleTextPos, scale = self.theme.partDiffTitleTextScale, align = self.theme.partDiffTitleTextAlign)
if self.theme.partDiffSubtitleText:
dialog.fontDict[self.theme.partDiffSubtitleTextFont].render(self.theme.partDiffSubtitleText, self.theme.partDiffSubtitleTextPos, scale = self.theme.partDiffSubtitleTextScale, align = self.theme.partDiffSubtitleTextAlign)
for i in range(len(dialog.players)):
glColor3f(*self.theme.partDiffHeaderColor)
dialog.fontDict[self.theme.partDiffGameModeFont].render(dialog.gameModeText, self.theme.partDiffGameModePos, scale = self.theme.partDiffGameModeScale, align = self.theme.partDiffGameModeAlign)
if i == dialog.keyControl and dialog.img_keyboard_panel:
dialog.drawImage(dialog.img_keyboard_panel, scale = (self.theme.partDiffPanelSize[0], -self.theme.partDiffPanelSize[1]), coord = (wP*.5+w*x,hP*.5+h*y), stretched = FULL_SCREEN)
elif dialog.img_panel:
dialog.drawImage(dialog.img_panel, scale = (self.theme.partDiffPanelSize[0], -self.theme.partDiffPanelSize[1]), coord = (wP*.5+w*x,hP*.5+h*y), stretched = FULL_SCREEN)
if i == dialog.keyControl and dialog.img_keyboard:
dialog.drawImage(dialog.img_keyboard, scale = (self.theme.partDiffKeyboardImgScale, -self.theme.partDiffKeyboardImgScale), coord = (wP*self.theme.partDiffKeyboardImgPos[0]+w*x, hP*self.theme.partDiffKeyboardImgPos[1]+h*y))
controlFont.render(dialog.players[i].name, (self.theme.partDiffPanelSize[0]*self.theme.partDiffControlPos[0]+x, self.theme.partDiffPanelSize[1]*self.theme.partDiffControlPos[1]+y), scale = self.theme.partDiffControlScale, align = self.theme.partDiffControlAlign, new = True)
panelNameFont.render(dialog.players[i].name.lower(), (x+w*self.theme.partDiffPanelNamePos[0], y+h*self.theme.partDiffPanelNamePos[1]), scale = self.theme.partDiffPanelNameScale, align = self.theme.partDiffPanelNameAlign, new = True)
if dialog.mode[i] == 0:
self.drawPartImage(dialog, dialog.parts[i][dialog.selected[i]].id, scale = (self.theme.partDiffPartScale, -self.theme.partDiffPartScale), coord = (wP*self.theme.partDiffPartPos[0]+w*x, hP*self.theme.partDiffPartPos[1]+h*y))
for p in range(len(dialog.parts[i])):
if dialog.selected[i] == p:
if dialog.img_selected:
dialog.drawImage(dialog.img_selected, scale = (.5, -.5), coord = (wP*.5+w*x, hP*(.46*.75)+h*y-(h*.04*p)/.75))
glColor3f(*self.theme.partDiffSelectedColor)
else:
glColor3f(*self.theme.partDiffOptionColor)
font.render(str(dialog.parts[i][p]), (.2*.5+x,.8*.46+y+.04*p), scale = .001, align = 1, new = True)
elif dialog.mode[i] == 1:
self.drawPartImage(dialog, dialog.players[i].part.id, scale = (self.theme.partDiffPartScale, -self.theme.partDiffPartScale), coord = (wP*self.theme.partDiffPartPos[0]+w*x, hP*self.theme.partDiffPartPos[1]+h*y))
for d in range(len(dialog.info.partDifficulties[dialog.players[i].part.id])):
if dialog.selected[i] == d:
if dialog.img_selected:
dialog.drawImage(dialog.img_selected, scale = (.5, -.5), coord = (wP*.5+w*x, hP*(.46*.75)+h*y-(h*.04*d)/.75))
glColor3f(*self.theme.partDiffSelectedColor)
else:
glColor3f(*self.theme.partDiffOptionColor)
font.render(str(dialog.info.partDifficulties[dialog.players[i].part.id][d]), (.2*.5+x,.8*.46+y+.04*d), scale = .001, align = 1, new = True)
if i in dialog.readyPlayers:
if dialog.img_ready:
dialog.drawImage(dialog.img_ready, scale = (.5, -.5), coord = (wP*.5+w*x,hP*(.75*.46)+h*y))
x += .24
class Setlist:
def __init__(self, theme):
self.theme = theme
self.setlist_type = theme.songListDisplay
if self.setlist_type is None:
self.setlist_type = 1
if self.setlist_type == 0: #CD mode
self.setlistStyle = 0
self.headerSkip = 0
self.footerSkip = 0
self.labelType = 1
self.labelDistance = 2
self.showMoreLabels = True
self.texturedLabels = True
self.itemsPerPage = 1
self.showLockedSongs = False
self.showSortTiers = True
self.selectTiers = False
self.itemSize = (0,.125)
elif self.setlist_type == 1: #List mode
self.setlistStyle = 1
self.headerSkip = 2
self.footerSkip = 1
self.labelType = 0
self.labelDistance = 0
self.showMoreLabels = False
self.texturedLabels = False
self.itemsPerPage = 7
self.showLockedSongs = False
self.showSortTiers = True
self.selectTiers = False
self.itemSize = (0,.126)
elif self.setlist_type == 2: #List/CD mode
self.setlistStyle = 1
self.headerSkip = 0
self.footerSkip = 1
self.labelType = 1
self.labelDistance = 1
self.showMoreLabels = False
self.texturedLabels = True
self.itemsPerPage = 8
self.showLockedSongs = False
self.showSortTiers = True
self.selectTiers = False
self.itemSize = (0,.125)
else: #RB2 mode
self.setlistStyle = 0
self.headerSkip = 0
self.footerSkip = 0
self.labelType = 0
self.labelDistance = 1
self.showMoreLabels = False
self.texturedLabels = False
self.itemsPerPage = 12
self.showLockedSongs = True
self.showSortTiers = True
self.selectTiers = False
self.itemSize = (0,.07)
self.career_title_color = self.theme.career_title_colorVar
self.song_name_text_color = self.theme.song_name_text_colorVar
self.song_name_selected_color = self.theme.song_name_selected_colorVar
self.song_rb2_diff_color = self.theme.song_rb2_diff_colorVar
self.artist_text_color = self.theme.artist_text_colorVar
self.artist_selected_color = self.theme.artist_selected_colorVar
self.library_text_color = self.theme.library_text_colorVar
self.library_selected_color = self.theme.library_selected_colorVar
self.songlist_score_color = self.theme.songlist_score_colorVar
self.songlistcd_score_color = self.theme.songlistcd_score_colorVar
self.song_cd_xpos = theme.song_cd_Xpos
self.song_cdscore_xpos = theme.song_cdscore_Xpos
self.song_list_xpos = theme.song_list_Xpos
self.song_listscore_xpos = theme.song_listscore_Xpos
self.song_listcd_list_xpos = theme.song_listcd_list_Xpos
self.song_listcd_cd_xpos = theme.song_listcd_cd_Xpos
self.song_listcd_cd_ypos = theme.song_listcd_cd_Ypos
self.song_listcd_score_xpos = theme.song_listcd_score_Xpos
self.song_listcd_score_ypos = theme.song_listcd_score_Ypos
def run(self, ticks):
pass
def renderHeader(self, scene):
pass
def renderUnselectedItem(self, scene, i, n):
w, h = scene.geometry
font = scene.fontDict['songListFont']
lfont = scene.fontDict['songListFont']
if self.setlist_type == 0:
return
elif self.setlist_type == 1:
if not scene.items:
return
item = scene.items[i]
glColor4f(0,0,0,1)
if isinstance(item, Song.SongInfo) or isinstance(item, Song.RandomSongInfo):
c1,c2,c3 = self.song_name_text_color
glColor3f(c1,c2,c3)
elif isinstance(item, Song.LibraryInfo):
c1,c2,c3 = self.library_text_color
glColor3f(c1,c2,c3)
elif isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
c1,c2,c3 = self.career_title_color
glColor3f(c1,c2,c3)
text = item.name
if isinstance(item, Song.SongInfo) and item.getLocked(): #TODO: SongDB
text = _("-- Locked --")
if isinstance(item, Song.SongInfo): #MFH - add indentation when tier sorting
if scene.tiersPresent:
text = " " + text
if isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
text = string.upper(text)
scale = lfont.scaleText(text, maxwidth = 0.440)
lfont.render(text, (self.song_list_xpos, .0925*(n+1)-.0375), scale = scale)
#MFH - Song list score / info display:
if isinstance(item, Song.SongInfo) and not item.getLocked():
scale = 0.0009
text = scene.scoreDifficulty.text
c1,c2,c3 = self.songlist_score_color
glColor3f(c1,c2,c3)
lfont.render(text, (self.song_listscore_xpos, .0925*(n+1)-.034), scale=scale, align = 2)
if not item.frets == "":
suffix = ", ("+item.frets+")"
else:
suffix = ""
if not item.year == "":
yeartag = ", "+item.year
else:
yeartag = ""
scale = .0014
c1,c2,c3 = self.artist_text_color
glColor3f(c1,c2,c3)
# evilynux - Force uppercase display for artist name
text = string.upper(item.artist)+suffix+yeartag
# evilynux - automatically scale artist name and year
scale = lfont.scaleText(text, maxwidth = 0.440, scale = scale)
if scale > .0014:
scale = .0014
lfont.render(text, (self.song_list_xpos+.05, .0925*(n+1)+.0125), scale=scale)
score = _("Nil")
stars = 0
name = ""
try:
difficulties = item.partDifficulties[scene.scorePart.id]
except KeyError:
difficulties = []
for d in difficulties:
if d.id == scene.scoreDifficulty.id:
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
handicap = 0
handicapLong = "None"
originalScore = score
else:
score, stars, name = 0, 0, "---"
if score == _("Nil") and scene.nilShowNextScore: #MFH
for d in difficulties: #MFH - just take the first valid difficulty you can find and display it.
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
handicap = 0
handicapLong = "None"
originalScore = score
break
else:
score, stars, name = 0, 0, "---"
else:
score, stars, name = _("Nil"), 0, "---"
starx = self.song_listscore_xpos+.01
stary = .0925*(n+1)-0.039
starscale = 0.03
stary = 1.0 - (stary / scene.fontScreenBottom)
scene.drawStarScore(w, h, starx, stary - h/2, stars, starscale, horiz_spacing = 1.0, hqStar = True) #MFH
scale = 0.0014
# evilynux - score color
c1,c2,c3 = self.songlist_score_color
glColor3f(c1,c2,c3)
# evilynux - hit% and note streak only if enabled
if score is not _("Nil") and score > 0 and notesTotal != 0:
text = "%.1f%% (%d)" % ((float(notesHit) / notesTotal) * 100.0, noteStreak)
lfont.render(text, (self.song_listscore_xpos+.1, .0925*(n+1)-.015), scale=scale, align = 2)
text = str(score)
lfont.render(text, (self.song_listscore_xpos+.1, .0925*(n+1)+.0125), scale=scale*1.28, align = 2)
elif self.setlist_type == 2: #old list/cd
if not scene.items:
return
item = scene.items[i]
if isinstance(item, Song.SongInfo) or isinstance(item, Song.RandomSongInfo):
c1,c2,c3 = self.song_name_text_color
glColor4f(c1,c2,c3,1)
if isinstance(item, Song.LibraryInfo):
c1,c2,c3 = self.library_text_color
glColor4f(c1,c2,c3,1)
if isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
c1,c2,c3 = self.career_title_color
glColor4f(c1,c2,c3,1)
text = item.name
if isinstance(item, Song.SongInfo) and item.getLocked():
text = _("-- Locked --")
if isinstance(item, Song.SongInfo): #MFH - add indentation when tier sorting
if scene.tiersPresent:
text = " " + text
if isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
text = string.upper(text)
scale = font.scaleText(text, maxwidth = 0.45)
font.render(text, (self.song_listcd_list_xpos, .09*(n+1)), scale = scale)
if isinstance(item, Song.SongInfo) and not item.getLocked():
if not item.frets == "":
suffix = ", ("+item.frets+")"
else:
suffix = ""
if not item.year == "":
yeartag = ", "+item.year
else:
yeartag = ""
scale = .0014
c1,c2,c3 = self.artist_text_color
glColor4f(c1,c2,c3,1)
text = string.upper(item.artist)+suffix+yeartag
scale = font.scaleText(text, maxwidth = 0.4, scale = scale)
font.render(text, (self.song_listcd_list_xpos + .05, .09*(n+1)+.05), scale=scale)
elif self.setlist_type == 3: #old rb2
font = scene.fontDict['songListFont']
if not scene.items or scene.itemIcons is None:
return
item = scene.items[i]
if scene.img_tier:
imgwidth = scene.img_tier.width1()
imgheight = scene.img_tier.height1()
wfactor = 381.1/imgwidth
hfactor = 24.000/imgheight
if isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo) and scene.img_tier:
scene.drawImage(scene.img_tier, scale = (wfactor,-hfactor), coord = (w/1.587, h-((0.055*h)*(n+1))-(0.219*h)))
icon = None
if isinstance(item, Song.SongInfo):
if item.icon != "":
try:
icon = scene.itemIcons[item.icon]
imgwidth = icon.width1()
wfactor = 23.000/imgwidth
scene.drawImage(icon, scale = (wfactor,-wfactor), coord = (w/2.86, h-((0.055*h)*(n+1))-(0.219*h)))
except KeyError:
pass
elif isinstance(item, Song.LibraryInfo):
try:
icon = scene.itemIcons["Library"]
imgwidth = icon.width1()
wfactor = 23.000/imgwidth
scene.drawImage(icon, scale = (wfactor,-wfactor), coord = (w/2.86, h-((0.055*h)*(n+1))-(0.219*h)))
except KeyError:
pass
elif isinstance(item, Song.RandomSongInfo):
try:
icon = scene.itemIcons["Random"]
imgwidth = icon.width1()
wfactor = 23.000/imgwidth
scene.drawImage(icon, scale = (wfactor,-wfactor), coord = (w/2.86, h-((0.055*h)*(n+1))-(0.219*h)))
except KeyError:
pass
if isinstance(item, Song.SongInfo) or isinstance(item, Song.LibraryInfo):
c1,c2,c3 = self.song_name_text_color
glColor4f(c1,c2,c3,1)
elif isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
c1,c2,c3 = self.career_title_color
glColor4f(c1,c2,c3,1)
elif isinstance(item, Song.RandomSongInfo):
c1,c2,c3 = self.song_name_text_color
glColor4f(c1,c2,c3,1)
text = item.name
if isinstance(item, Song.SongInfo) and item.getLocked():
text = _("-- Locked --")
if isinstance(item, Song.SongInfo): #MFH - add indentation when tier sorting
if scene.tiersPresent or icon:
text = " " + text
# evilynux - Force uppercase display for Career titles
maxwidth = .55
if isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
text = string.upper(text)
scale = .0015
wt, ht = font.getStringSize(text, scale = scale)
while wt > maxwidth:
tlength = len(text) - 4
text = text[:tlength] + "..."
wt, ht = font.getStringSize(text, scale = scale)
if wt < .45:
break
font.render(text, (.35, .0413*(n+1)+.15), scale = scale)
if isinstance(item, Song.SongInfo):
score = _("Nil")
stars = 0
name = ""
if not item.getLocked():
try:
difficulties = item.partDifficulties[scene.scorePart.id]
except KeyError:
difficulties = []
for d in difficulties:
if d.id == scene.scoreDifficulty.id:
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
handicap = 0
handicapLong = "None"
originalScore = score
break
else:
score, stars, name = 0, 0, "---"
if score == _("Nil") and scene.nilShowNextScore: #MFH
for d in difficulties: #MFH - just take the first valid difficulty you can find and display it.
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
break
else:
score, stars, name = 0, 0, "---"
else:
score, stars, name = _("Nil"), 0, "---"
#evilynux - hit% and note streak if enabled
scale = 0.0009
if score is not _("Nil") and score > 0 and notesTotal != 0:
text = "%.1f%% (%d)" % ((float(notesHit) / notesTotal) * 100.0, noteStreak)
font.render(text, (.92, .0413*(n+1)+.163), scale=scale, align = 2)
text = str(score)
font.render(text, (.92, .0413*(n+1)+.15), scale=scale, align = 2)
def renderSelectedItem(self, scene, n):
w, h = scene.geometry
font = scene.fontDict['songListFont']
lfont = scene.fontDict['songListFont']
sfont = scene.fontDict['shadowFont']
item = scene.selectedItem
if not item:
return
if isinstance(item, Song.BlankSpaceInfo):
return
if self.setlist_type == 0:
return
elif self.setlist_type == 1:
y = h*(.88-(.125*n))
if scene.img_item_select:
wfactor = scene.img_item_select.widthf(pixelw = 635.000)
scene.drawImage(scene.img_item_select, scale = (wfactor,-wfactor), coord = (w/2.1, y))
glColor4f(0,0,0,1)
if isinstance(item, Song.SongInfo) or isinstance(item, Song.RandomSongInfo):
c1,c2,c3 = self.song_name_selected_color
glColor3f(c1,c2,c3)
elif isinstance(item, Song.LibraryInfo):
c1,c2,c3 = self.library_selected_color
glColor3f(c1,c2,c3)
elif isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
c1,c2,c3 = self.career_title_color
glColor3f(c1,c2,c3)
text = item.name
if isinstance(item, Song.SongInfo) and item.getLocked(): #TODO: SongDB
text = _("-- Locked --")
if isinstance(item, Song.SongInfo): #MFH - add indentation when tier sorting
if scene.tiersPresent:
text = " " + text
if isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
text = string.upper(text)
scale = sfont.scaleText(text, maxwidth = 0.440)
sfont.render(text, (self.song_list_xpos, .0925*(n+1)-.0375), scale = scale)
#MFH - Song list score / info display:
if isinstance(item, Song.SongInfo) and not item.getLocked():
scale = 0.0009
text = scene.scoreDifficulty.text
c1,c2,c3 = self.songlist_score_color
glColor3f(c1,c2,c3)
lfont.render(text, (self.song_listscore_xpos, .0925*(n+1)-.034), scale=scale, align = 2)
if not item.frets == "":
suffix = ", ("+item.frets+")"
else:
suffix = ""
if not item.year == "":
yeartag = ", "+item.year
else:
yeartag = ""
scale = .0014
c1,c2,c3 = self.artist_selected_color
glColor3f(c1,c2,c3)
# evilynux - Force uppercase display for artist name
text = string.upper(item.artist)+suffix+yeartag
# evilynux - automatically scale artist name and year
scale = lfont.scaleText(text, maxwidth = 0.440, scale = scale)
if scale > .0014:
scale = .0014
lfont.render(text, (self.song_list_xpos+.05, .0925*(n+1)+.0125), scale=scale)
score = _("Nil")
stars = 0
name = ""
try:
difficulties = item.partDifficulties[scene.scorePart.id]
except KeyError:
difficulties = []
for d in difficulties:
if d.id == scene.scoreDifficulty.id:
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
handicap = 0
handicapLong = "None"
originalScore = score
else:
score, stars, name = 0, 0, "---"
if score == _("Nil") and scene.nilShowNextScore: #MFH
for d in difficulties: #MFH - just take the first valid difficulty you can find and display it.
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
handicap = 0
handicapLong = "None"
originalScore = score
break
else:
score, stars, name = 0, 0, "---"
else:
score, stars, name = _("Nil"), 0, "---"
starx = self.song_listscore_xpos+.01
stary = .0925*(n+1)-0.039
starscale = 0.03
stary = 1.0 - (stary / scene.fontScreenBottom)
scene.drawStarScore(w, h, starx, stary - h/2, stars, starscale, horiz_spacing = 1.0, hqStar = True) #MFH
scale = 0.0014
# evilynux - score color
c1,c2,c3 = self.songlist_score_color
glColor3f(c1,c2,c3)
# evilynux - hit% and note streak only if enabled
if score is not _("Nil") and score > 0 and notesTotal != 0:
text = "%.1f%% (%d)" % ((float(notesHit) / notesTotal) * 100.0, noteStreak)
lfont.render(text, (self.song_listscore_xpos+.1, .0925*(n+1)-.015), scale=scale, align = 2)
text = str(score)
lfont.render(text, (self.song_listscore_xpos+.1, .0925*(n+1)+.0125), scale=scale*1.28, align = 2)
elif self.setlist_type == 2:
y = h*(.87-(.1*n))
glColor4f(1,1,1,1)
if scene.img_selected:
imgwidth = scene.img_selected.width1()
scene.drawImage(scene.img_selected, scale = (1, -1), coord = (self.song_listcd_list_xpos * w + (imgwidth*.64/2), y*1.2-h*.215))
text = scene.library
font.render(text, (.05, .01))
if scene.songLoader:
font.render(_("Loading Preview..."), (.05, .7), scale = 0.001)
if isinstance(item, Song.SongInfo):
c1,c2,c3 = self.song_name_selected_color
glColor4f(c1,c2,c3,1)
if item.getLocked():
text = item.getUnlockText()
elif scene.careerMode and not item.completed:
text = _("Play To Advance")
elif scene.practiceMode:
text = _("Practice")
elif item.count:
count = int(item.count)
if count == 1:
text = _("Played Once")
else:
text = _("Played %d times.") % count
else:
text = _("Quickplay")
elif isinstance(item, Song.LibraryInfo):
c1,c2,c3 = self.library_selected_color
glColor4f(c1,c2,c3,1)
if item.songCount == 1:
text = _("There Is 1 Song In This Setlist.")
elif item.songCount > 1:
text = _("There Are %d Songs In This Setlist.") % (item.songCount)
else:
text = ""
elif isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
text = _("Tier")
c1,c2,c3 = self.career_title_color
glColor4f(c1,c2,c3,1)
elif isinstance(item, Song.RandomSongInfo):
text = _("Random Song")
c1,c2,c3 = self.song_name_selected_color
glColor4f(c1,c2,c3,1)
font.render(text, (self.song_listcd_score_xpos, .085), scale = 0.0012)
if isinstance(item, Song.SongInfo) or isinstance(item, Song.RandomSongInfo):
c1,c2,c3 = self.song_name_selected_color
glColor4f(c1,c2,c3,1)
elif isinstance(item, Song.LibraryInfo):
c1,c2,c3 = self.library_selected_color
glColor4f(c1,c2,c3,1)
elif isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
c1,c2,c3 = self.career_title_color
glColor4f(c1,c2,c3,1)
text = item.name
if isinstance(item, Song.SongInfo) and item.getLocked():
text = _("-- Locked --")
if isinstance(item, Song.SongInfo): #MFH - add indentation when tier sorting
if scene.tiersPresent:
text = " " + text
elif isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
text = string.upper(text)
scale = font.scaleText(text, maxwidth = 0.45)
font.render(text, (self.song_listcd_list_xpos, .09*(n+1)), scale = scale)
if isinstance(item, Song.SongInfo) and not item.getLocked():
if not item.frets == "":
suffix = ", ("+item.frets+")"
else:
suffix = ""
if not item.year == "":
yeartag = ", "+item.year
else:
yeartag = ""
scale = .0014
c1,c2,c3 = self.artist_selected_color
glColor4f(c1,c2,c3,1)
text = string.upper(item.artist)+suffix+yeartag
scale = font.scaleText(text, maxwidth = 0.4, scale = scale)
font.render(text, (self.song_listcd_list_xpos + .05, .09*(n+1)+.05), scale=scale)
elif self.setlist_type == 3:
y = h*(.7825-(.0459*(n)))
if scene.img_tier:
imgwidth = scene.img_tier.width1()
imgheight = scene.img_tier.height1()
wfactor = 381.1/imgwidth
hfactor = 24.000/imgheight
if isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
scene.drawImage(scene.img_tier, scale = (wfactor,-hfactor), coord = (w/1.587, h-((0.055*h)*(n+1))-(0.219*h)))
if scene.img_selected:
imgwidth = scene.img_selected.width1()
imgheight = scene.img_selected.height1()
wfactor = 381.5/imgwidth
hfactor = 36.000/imgheight
scene.drawImage(scene.img_selected, scale = (wfactor,-hfactor), coord = (w/1.587, y*1.2-h*.213))
icon = None
if isinstance(item, Song.SongInfo):
if item.icon != "":
try:
icon = scene.itemIcons[item.icon]
imgwidth = icon.width1()
wfactor = 23.000/imgwidth
scene.drawImage(icon, scale = (wfactor,-wfactor), coord = (w/2.86, h-((0.055*h)*(n+1))-(0.219*h)))
except KeyError:
pass
c1,c2,c3 = self.song_name_selected_color
glColor3f(c1,c2,c3)
if item.getLocked():
text = item.getUnlockText()
elif scene.careerMode and not item.completed:
text = _("Play To Advance")
elif scene.practiceMode:
text = _("Practice")
elif item.count:
count = int(item.count)
if count == 1:
text = _("Played Once")
else:
text = _("Played %d times.") % count
else:
text = _("Quickplay")
elif isinstance(item, Song.LibraryInfo):
try:
icon = scene.itemIcons["Library"]
imgwidth = icon.width1()
wfactor = 23.000/imgwidth
scene.drawImage(icon, scale = (wfactor,-wfactor), coord = (w/2.86, h-((0.055*h)*(n+1))-(0.219*h)))
except KeyError:
pass
c1,c2,c3 = self.library_selected_color
glColor3f(c1,c2,c3)
if item.songCount == 1:
text = _("There Is 1 Song In This Setlist.")
elif item.songCount > 1:
text = _("There Are %d Songs In This Setlist.") % (item.songCount)
else:
text = ""
elif isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
text = _("Tier")
c1,c2,c3 = self.career_title_color
glColor3f(c1,c2,c3)
elif isinstance(item, Song.RandomSongInfo):
try:
icon = scene.itemIcons["Random"]
imgwidth = icon.width1()
wfactor = 23.000/imgwidth
scene.drawImage(icon, scale = (wfactor,-wfactor), coord = (w/2.86, h-((0.055*h)*(n+1))-(0.219*h)))
except KeyError:
pass
text = _("Random Song")
c1,c2,c3 = self.career_title_color
glColor3f(c1,c2,c3)
font.render(text, (0.92, .13), scale = 0.0012, align = 2)
maxwidth = .45
if isinstance(item, Song.SongInfo) or isinstance(item, Song.LibraryInfo) or isinstance(item, Song.RandomSongInfo):
c1,c2,c3 = self.song_name_selected_color
glColor4f(c1,c2,c3,1)
if isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
c1,c2,c3 = self.career_title_color
glColor4f(c1,c2,c3,1)
text = item.name
if isinstance(item, Song.SongInfo) and item.getLocked():
text = _("-- Locked --")
if isinstance(item, Song.SongInfo): #MFH - add indentation when tier sorting
if scene.tiersPresent or icon:
text = " " + text
# evilynux - Force uppercase display for Career titles
if isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
maxwidth = .55
text = string.upper(text)
scale = .0015
wt, ht = font.getStringSize(text, scale = scale)
while wt > maxwidth:
tlength = len(text) - 4
text = text[:tlength] + "..."
wt, ht = font.getStringSize(text, scale = scale)
if wt < .45:
break
font.render(text, (.35, .0413*(n+1)+.15), scale = scale) #add theme option for song_listCD_xpos
if isinstance(item, Song.SongInfo):
score = _("Nil")
stars = 0
name = ""
if not item.getLocked():
try:
difficulties = item.partDifficulties[scene.scorePart.id]
except KeyError:
difficulties = []
for d in difficulties:
if d.id == scene.scoreDifficulty.id:
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
handicap = 0
handicapLong = "None"
originalScore = score
break
else:
score, stars, name = 0, 0, "---"
if score == _("Nil") and scene.nilShowNextScore: #MFH
for d in difficulties: #MFH - just take the first valid difficulty you can find and display it.
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
break
else:
score, stars, name = 0, 0, "---"
else:
score, stars, name = _("Nil"), 0, "---"
scale = 0.0009
if score is not _("Nil") and score > 0 and notesTotal != 0:
text = "%.1f%% (%d)" % ((float(notesHit) / notesTotal) * 100.0, noteStreak)
w, h = font.getStringSize(text, scale=scale)
font.render(text, (.92, .0413*(n+1)+.163), scale=scale, align = 2)
text = str(score)
font.render(text, (.92, .0413*(n+1)+.15), scale=scale, align = 2)
def renderItem(self, scene, color, label):
if not scene.itemMesh:
return
if color:
glColor3f(*color)
glEnable(GL_COLOR_MATERIAL)
if self.setlist_type == 2:
glRotate(90, 0, 0, 1)
glRotate(((scene.time - scene.lastTime) * 2 % 360) - 90, 1, 0, 0)
scene.itemMesh.render("Mesh_001")
glColor3f(.1, .1, .1)
scene.itemMesh.render("Mesh")
if label and scene.label:
glEnable(GL_TEXTURE_2D)
label.bind()
glColor3f(1, 1, 1)
glMatrixMode(GL_TEXTURE)
glScalef(1, -1, 1)
glMatrixMode(GL_MODELVIEW)
scene.label.render("Mesh_001")
glMatrixMode(GL_TEXTURE)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glDisable(GL_TEXTURE_2D)
if shaders.enable("cd"):
scene.itemMesh.render("Mesh_001")
shaders.disable()
def renderLibrary(self, scene, color, label):
if not scene.libraryMesh:
return
if color:
glColor3f(*color)
glEnable(GL_NORMALIZE)
glEnable(GL_COLOR_MATERIAL)
if self.setlist_type == 2:
glRotate(-180, 0, 1, 0)
glRotate(-90, 0, 0, 1)
glRotate(((scene.time - scene.lastTime) * 4 % 360) - 90, 1, 0, 0)
scene.libraryMesh.render("Mesh_001")
glColor3f(.1, .1, .1)
scene.libraryMesh.render("Mesh")
# Draw the label if there is one
if label and scene.libraryLabel:
glEnable(GL_TEXTURE_2D)
label.bind()
glColor3f(1, 1, 1)
glMatrixMode(GL_TEXTURE)
glScalef(1, -1, 1)
glMatrixMode(GL_MODELVIEW)
scene.libraryLabel.render()
glMatrixMode(GL_TEXTURE)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glDisable(GL_TEXTURE_2D)
glDisable(GL_NORMALIZE)
def renderTitle(self, scene, color, label):
if not scene.tierMesh:
return
if color:
glColor3f(*color)
glEnable(GL_NORMALIZE)
glEnable(GL_COLOR_MATERIAL)
scene.tierMesh.render("Mesh_001")
glColor3f(.1, .1, .1)
scene.tierMesh.render("Mesh")
# Draw the label if there is one
if label:
glEnable(GL_TEXTURE_2D)
label.bind()
glColor3f(1, 1, 1)
glMatrixMode(GL_TEXTURE)
glScalef(1, -1, 1)
glMatrixMode(GL_MODELVIEW)
scene.libraryLabel.render()
glMatrixMode(GL_TEXTURE)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glDisable(GL_TEXTURE_2D)
glDisable(GL_NORMALIZE)
def renderRandom(self, scene, color, label):
if not scene.itemMesh:
return
if color:
glColor3f(*color)
glEnable(GL_NORMALIZE)
glEnable(GL_COLOR_MATERIAL)
scene.itemMesh.render("Mesh_001")
glColor3f(.1, .1, .1)
scene.itemMesh.render("Mesh")
# Draw the label if there is one
if label:
glEnable(GL_TEXTURE_2D)
label.bind()
glColor3f(1, 1, 1)
glMatrixMode(GL_TEXTURE)
glScalef(1, -1, 1)
glMatrixMode(GL_MODELVIEW)
scene.libraryLabel.render()
glMatrixMode(GL_TEXTURE)
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glDisable(GL_TEXTURE_2D)
glDisable(GL_NORMALIZE)
def renderAlbumArt(self, scene):
if not scene.itemLabels:
return
if self.setlist_type == 0:
w, h = scene.geometry
try:
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluPerspective(60, scene.aspectRatio, 0.1, 1000)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glEnable(GL_DEPTH_TEST)
glDisable(GL_CULL_FACE)
glDepthMask(1)
offset = 0
if scene.time < 40:
offset = 10*((40 - scene.time)/40.0)**4
scene.camera.origin = (-10 + offset, -scene.cameraOffset, 4 - self.song_cd_xpos + offset)
scene.camera.target = ( 0 + offset, -scene.cameraOffset, 2.5 - self.song_cd_xpos + offset)
scene.camera.apply()
y = 0.0
for i, item in enumerate(scene.items):
c = math.sin(scene.itemRenderAngles[i] * math.pi / 180)
if isinstance(item, Song.SongInfo):
h = c * 4.0 + (1 - c) * .8
elif isinstance(item, Song.LibraryInfo):
h = c * 4.0 + (1 - c) * 1.2
elif isinstance(item, Song.TitleInfo) or isinstance(item, Song.SortTitleInfo):
h = c * 4.0 + (1 - c) * 2.4
elif isinstance(item, Song.RandomSongInfo):
h = c * 4.0 + (1 - c) * .8
else:
continue
d = (y + h * .5 + scene.camera.origin[1]) / (4 * (scene.camera.target[2] - scene.camera.origin[2]))
if i == scene.selectedIndex:
scene.selectedOffset = y + h / 2
self.theme.setSelectedColor()
else:
self.theme.setBaseColor()
glTranslatef(0, -h / 2, 0)
glPushMatrix()
if abs(d) < 1.2:
label = scene.itemLabels[i]
if label == "Random":
label = scene.img_random_label
if label == False:
label = scene.img_empty_label
if isinstance(item, Song.SongInfo):
glRotate(scene.itemRenderAngles[i], 0, 0, 1)
self.renderItem(scene, item.cassetteColor, label)
elif isinstance(item, Song.LibraryInfo):
#myfingershurt: cd cases are backwards
glRotate(-scene.itemRenderAngles[i], 0, 1, 0) #spin 90 degrees around y axis
glRotate(-scene.itemRenderAngles[i], 0, 1, 0) #spin 90 degrees around y axis again, now case is corrected
glRotate(-scene.itemRenderAngles[i], 0, 0, 1) #bring cd case up for viewing
if i == scene.selectedIndex:
glRotate(((scene.time - scene.lastTime) * 4 % 360) - 90, 1, 0, 0)
self.renderLibrary(scene, item.color, label)
elif isinstance(item, Song.TitleInfo):
#myfingershurt: cd cases are backwards
glRotate(-scene.itemRenderAngles[i], 0, 0.5, 0) #spin 90 degrees around y axis
glRotate(-scene.itemRenderAngles[i], 0, 0.5, 0) #spin 90 degrees around y axis again, now case is corrected
glRotate(-scene.itemRenderAngles[i], 0, 0, 0.5) #bring cd case up for viewing
if i == scene.selectedIndex:
glRotate(((scene.time - scene.lastTime) * 4 % 360) - 90, 1, 0, 0)
self.renderTitle(scene, item.color, label)
elif isinstance(item, Song.RandomSongInfo):
#myfingershurt: cd cases are backwards
glRotate(scene.itemRenderAngles[i], 0, 0, 1)
self.renderRandom(scene, item.color, label)
glPopMatrix()
glTranslatef(0, -h/2, 0)
y+= h
glDisable(GL_DEPTH_TEST)
glDisable(GL_CULL_FACE)
glDepthMask(0)
finally:
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
elif self.setlist_type == 1:
return
elif self.setlist_type == 2:
w, h = scene.geometry
try:
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
gluPerspective(60, scene.aspectRatio, 0.1, 1000)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glEnable(GL_DEPTH_TEST)
glDisable(GL_CULL_FACE)
glDepthMask(1)
offset = 0
if scene.time < 40:
offset = 10*((40 - scene.time)/40.0)**4
scene.camera.origin = (-9,(5.196/scene.aspectRatio) - (5.196*2/scene.aspectRatio)*self.song_listcd_cd_ypos,(5.196*scene.aspectRatio)-(5.196*2*scene.aspectRatio)*self.song_listcd_cd_xpos)
scene.camera.target = ( 0,(5.196/scene.aspectRatio) - (5.196*2/scene.aspectRatio)*self.song_listcd_cd_ypos,(5.196*scene.aspectRatio)-(5.196*2*scene.aspectRatio)*self.song_listcd_cd_xpos)
scene.camera.apply()
y = 0.0
glPushMatrix()
item = scene.selectedItem
i = scene.selectedIndex
label = scene.itemLabels[i]
if label == "Random":
label = scene.img_random_label
if not label:
label = scene.img_empty_label
if isinstance(item, Song.SongInfo):
if scene.labelType:
self.renderItem(scene, item.cassetteColor, label)
else:
self.renderLibrary(scene, item.cassetteColor, label)
elif isinstance(item, Song.LibraryInfo):
self.renderLibrary(scene, item.color, label)
elif isinstance(item, Song.RandomSongInfo):
if scene.labelType:
self.renderItem(scene, None, label)
else:
self.renderLibrary(scene, None, label)
glPopMatrix()
glTranslatef(0, -h / 2, 0)
y += h
glDisable(GL_DEPTH_TEST)
glDisable(GL_CULL_FACE)
glDepthMask(0)
finally:
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
#resets the rendering
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
viewport = glGetIntegerv(GL_VIEWPORT)
w = viewport[2] - viewport[0]
h = viewport[3] - viewport[1]
h *= (float(w) / float(h)) / (4.0 / 3.0)
glOrtho(0, 1, h/w, 0, -100, 100)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_COLOR_MATERIAL)
self.theme.setBaseColor(1)
elif self.setlist_type == 3:
w, h = scene.geometry
item = scene.items[scene.selectedIndex]
i = scene.selectedIndex
img = None
lockImg = None
if scene.itemLabels[i] == "Random":
if scene.img_random_label:
img = scene.img_random_label
imgwidth = img.width1()
wfactor = 155.000/imgwidth
elif scene.img_empty_label:
img = scene.img_empty_label
imgwidth = img.width1()
wfactor = 155.000/imgwidth
elif not scene.itemLabels[i]:
if scene.img_empty_label != None:
imgwidth = scene.img_empty_label.width1()
wfactor = 155.000/imgwidth
img = scene.img_empty_label
elif scene.itemLabels[i]:
img = scene.itemLabels[i]
imgwidth = img.width1()
wfactor = 155.000/imgwidth
if isinstance(item, Song.SongInfo) and item.getLocked():
if scene.img_locked_label:
imgwidth = scene.img_locked_label.width1()
wfactor2 = 155.000/imgwidth
lockImg = scene.img_locked_label
elif scene.img_empty_label:
imgwidth = scene.img_empty_label.width1()
wfactor = 155.000/imgwidth
img = scene.img_empty_label
if img:
scene.drawImage(img, scale = (wfactor,-wfactor), coord = (.21*w,.59*h))
if lockImg:
scene.drawImage(lockImg, scale = (wfactor2,-wfactor2), coord = (.21*w,.59*h))
def renderForeground(self, scene):
font = scene.fontDict['songListFont']
w, h = scene.geometry
if self.setlist_type == 2:
text = scene.scorePart.text
scale = 0.00250
glColor3f(1, 1, 1)
font.render(text, (0.95, 0.000), scale=scale, align = 2)
elif self.setlist_type == 3:
font = scene.fontDict['songListFont']
c1,c2,c3 = self.song_rb2_diff_color
glColor3f(c1,c2,c3)
font.render(_("DIFFICULTY"), (.095, .5325), scale = 0.0018)
scale = 0.0014
text = _("BAND")
font.render(text, (.17, .5585), scale = scale, align = 2)
text = _("GUITAR")
font.render(text, (.17, .5835), scale = scale, align = 2)
text = _("DRUM")
font.render(text, (.17, .6085), scale = scale, align = 2)
text = _("BASS")
font.render(text, (.17, .6335), scale = scale, align = 2)
text = _("VOCALS")
font.render(text, (.17, .6585), scale = scale, align = 2)
#Add support for lead and rhythm diff
#Qstick - Sorting Text
text = _("SORTING:") + " "
if scene.sortOrder == 0: #title
text = text + _("ALPHABETICALLY BY TITLE")
elif scene.sortOrder == 1: #artist
text = text + _("ALPHABETICALLY BY ARTIST")
elif scene.sortOrder == 2: #timesplayed
text = text + _("BY PLAY COUNT")
elif scene.sortOrder == 3: #album
text = text + _("ALPHABETICALLY BY ALBUM")
elif scene.sortOrder == 4: #genre
text = text + _("ALPHABETICALLY BY GENRE")
elif scene.sortOrder == 5: #year
text = text + _("BY YEAR")
elif scene.sortOrder == 6: #Band Difficulty
text = text + _("BY BAND DIFFICULTY")
elif scene.sortOrder == 7: #Band Difficulty
text = text + _("BY INSTRUMENT DIFFICULTY")
else:
text = text + _("BY SONG COLLECTION")
font.render(text, (.13, .152), scale = 0.0017)
if scene.songLoader:
font.render(_("Loading Preview..."), (.05, .7), scale = 0.001)
return
if scene.img_list_button_guide:
scene.drawImage(scene.img_list_button_guide, scale = (.5, -.5), coord = (w*.5,0), fit = BOTTOM)
if scene.songLoader:
font.render(_("Loading Preview..."), (.5, .7), align = 1)
if scene.searching:
font.render(scene.searchText, (.5, .7), align = 1)
if scene.img_list_fg:
scene.drawImage(scene.img_list_fg, scale = (1.0, -1.0), coord = (w/2,h/2), stretched = FULL_SCREEN)
def renderSelectedInfo(self, scene):
if self.setlist_type == 0: #note... clean this up. this was a rush job.
if not scene.selectedItem:
return
font = scene.fontDict['font']
screenw, screenh = scene.geometry
v = 0
lfont = font
# here we reset the rendering... without pushing the matrices. (they be thar)
# (otherwise copying engine.view.setOrthogonalProjection)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
viewport = glGetIntegerv(GL_VIEWPORT)
w = viewport[2] - viewport[0]
h = viewport[3] - viewport[1]
h *= (float(w) / float(h)) / (4.0 / 3.0)
glOrtho(0, 1, h/w, 0, -100, 100)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_COLOR_MATERIAL)
self.theme.setBaseColor(1)
if scene.songLoader:
font.render(_("Loading Preview..."), (.05, .7), scale = 0.001)
#x = .6
x = self.song_cdscore_xpos
y = .15
self.theme.setSelectedColor(1)
c1,c2,c3 = self.song_name_selected_color
glColor3f(c1,c2,c3)
item = scene.selectedItem
angle = scene.itemRenderAngles[scene.selectedIndex]
f = ((90.0 - angle) / 90.0) ** 2
cText = item.name
if (isinstance(item, Song.SongInfo) and item.getLocked()):
cText = _("-- Locked --")
fh = lfont.getHeight()*0.0016
lfont.render(cText, (x, y), scale = 0.0016)
if isinstance(item, Song.SongInfo):
self.theme.setBaseColor(1)
c1,c2,c3 = self.artist_selected_color
glColor3f(c1,c2,c3)
if not item.year == "":
yeartag = ", "+item.year
else:
yeartag = ""
cText = item.artist + yeartag
if (item.getLocked()):
cText = "" # avoid giving away artist of locked song
# evilynux - Use font w/o outline
lfont.render(cText, (x, y+fh), scale = 0.0016)
if item.count:
self.theme.setSelectedColor(1)
c1,c2,c3 = self.song_name_selected_color
glColor3f(c1,c2,c3)
count = int(item.count)
if count == 1:
text = _("Played %d time") % count
else:
text = _("Played %d times") % count
if item.getLocked():
text = item.getUnlockText()
elif scene.careerMode and not item.completed:
text = _("Play To Advance.")
font.render(text, (x, y+2*fh), scale = 0.001)
else:
text = _("Never Played")
if item.getLocked():
text = item.getUnlockText()
elif scene.careerMode and not item.completed:
text = _("Play To Advance.")
lfont.render(text, (x, y+3*fh), scale = 0.001)
self.theme.setSelectedColor(1 - v)
c1,c2,c3 = self.songlistcd_score_color
glColor3f(c1,c2,c3)
scale = 0.0011
#x = .6
x = self.song_cdscore_xpos
y = .42
try:
difficulties = item.partDifficulties[scene.scorePart.id]
except KeyError:
difficulties = []
if len(difficulties) > 3:
y = .42
elif len(difficulties) == 0:
score, stars, name = "---", 0, "---"
for d in difficulties:
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
handicap = 0
handicapLong = "None"
originalScore = score
else:
score, stars, name = "---", 0, "---"
self.theme.setBaseColor(1)
font.render(Song.difficulties[d.id].text, (x, y), scale = scale)
starscale = 0.02
stary = 1.0 - y/scene.fontScreenBottom
scene.drawStarScore(screenw, screenh, x+.01, stary-2*fh, stars, starscale, hqStar = True) #volshebnyi
self.theme.setSelectedColor(1)
# evilynux - Also use hit%/noteStreak SongList option
if scores:
if notesTotal != 0:
score = "%s %.1f%%" % (score, (float(notesHit) / notesTotal) * 100.0)
if noteStreak != 0:
score = "%s (%d)" % (score, noteStreak)
font.render(unicode(score), (x + .15, y), scale = scale)
font.render(name, (x + .15, y + fh), scale = scale)
y += 2 * fh
elif isinstance(item, Song.LibraryInfo):
self.theme.setBaseColor(1)
c1,c2,c3 = self.library_selected_color
glColor3f(c1,c2,c3)
if item.songCount == 1:
songCount = _("One Song In This Setlist")
else:
songCount = _("%d Songs In This Setlist") % item.songCount
font.render(songCount, (x, y + 3*fh), scale = 0.0016)
elif isinstance(item, Song.RandomSongInfo):
self.theme.setBaseColor(1 - v)
c1,c2,c3 = self.song_name_selected_color
glColor3f(c1,c2,c3)
font.render(_("(Random Song)"), (x, y + 3*fh), scale = 0.0016)
#MFH CD list
text = scene.scorePart.text
scale = 0.00250
#glColor3f(1, 1, 1)
c1,c2,c3 = self.song_name_selected_color
glColor3f(c1,c2,c3)
w, h = font.getStringSize(text, scale=scale)
font.render(text, (0.95-w, 0.000), scale=scale)
# finally:
# pass
elif self.setlist_type == 1:
return
elif self.setlist_type == 2:
if not scene.selectedItem:
return
item = scene.selectedItem
font = scene.fontDict['font']
w, h = scene.geometry
lfont = font
fh = lfont.getHeight()*0.0016
if isinstance(item, Song.SongInfo):
angle = scene.itemRenderAngles[scene.selectedIndex]
f = ((90.0 - angle) / 90.0) ** 2
self.theme.setSelectedColor(1)
c1,c2,c3 = self.songlistcd_score_color
glColor4f(c1,c2,c3,1)
scale = 0.0013
x = self.song_listcd_score_xpos
y = self.song_listcd_score_ypos + f / 2.0
try:
difficulties = item.partDifficulties[scene.scorePart.id]
except KeyError:
difficulties = []
score, stars, name = "---", 0, "---"
if len(difficulties) > 3:
y = self.song_listcd_score_ypos + f / 2.0
#new
for d in difficulties:
scores = item.getHighscores(d, part = scene.scorePart)
if scores:
score, stars, name, scoreExt = scores[0]
try:
notesHit, notesTotal, noteStreak, modVersion, handicap, handicapLong, originalScore = scoreExt
except ValueError:
notesHit, notesTotal, noteStreak, modVersion, oldScores1, oldScores2 = scoreExt
else:
score, stars, name = "---", 0, "---"
font.render(Song.difficulties[d.id].text, (x, y), scale = scale)
starscale = 0.02
starx = x + starscale/2
stary = 1.0 - (y / scene.fontScreenBottom) - fh - starscale
scene.drawStarScore(w, h, starx, stary, stars, starscale) #MFH
c1,c2,c3 = self.songlistcd_score_color
glColor3f(c1,c2,c3)
if scores:
if notesTotal != 0:
score = "%s %.1f%%" % (score, (float(notesHit) / notesTotal) * 100.0)
if noteStreak != 0:
score = "%s (%d)" % (score, noteStreak)
font.render(unicode(score), (x + .15, y), scale = scale)
font.render(name, (x + .15, y + fh), scale = scale)
y += 2 * fh + f / 4.0
elif self.setlist_type == 3:
w, h = scene.geometry
font = scene.fontDict['songListFont']
item = scene.selectedItem
if isinstance(item, Song.SongInfo):
text = item.artist
if (item.getLocked()):
text = "" # avoid giving away artist of locked song
scale = 0.0015
wt, ht = font.getStringSize(text, scale=scale)
while wt > .21:
tlength = len(text) - 4
text = text[:tlength] + "..."
wt, ht = font.getStringSize(text, scale = scale)
if wt < .22:
break
c1,c2,c3 = self.artist_text_color
glColor3f(c1,c2,c3)
text = string.upper(text)
font.render(text, (.095, .44), scale = scale)
if scene.img_diff3 != None:
imgwidth = scene.img_diff3.width1()
wfactor1 = 13.0/imgwidth
albumtag = item.album
albumtag = string.upper(albumtag)
wt, ht = font.getStringSize(albumtag, scale=scale)
while wt > .21:
tlength = len(albumtag) - 4
albumtag = albumtag[:tlength] + "..."
wt, ht = font.getStringSize(albumtag, scale = scale)
if wt < .22:
break
font.render(albumtag, (.095, .47), scale = 0.0015)
genretag = item.genre
font.render(genretag, (.095, .49), scale = 0.0015)
yeartag = item.year
font.render(yeartag, (.095, .51), scale = 0.0015)
for i in range(5):
glColor3f(1, 1, 1)
if i == 0:
diff = item.diffSong
elif i == 1:
diff = item.diffGuitar
elif i == 2:
diff = item.diffDrums
elif i == 3:
diff = item.diffBass
elif i == 4:
diff = item.diffVocals
if scene.img_diff1 == None or scene.img_diff2 == None or scene.img_diff3 == None:
if diff == -1:
font.render("N/A", (.18, .5585 + i*.025), scale = 0.0014)
elif diff == 6:
glColor3f(1, 1, 0)
font.render(str("*" * (diff -1)), (.18, 0.5685 + i*.025), scale = 0.003)
else:
font.render(str("*" * diff + " " * (5 - diff)), (.18, 0.5685 + i*.025), scale = 0.003)
else:
if diff == -1:
font.render("N/A", (.18, .5585 + i*.025), scale = 0.0014)
elif diff == 6:
for k in range(0,5):
scene.drawImage(scene.img_diff3, scale = (wfactor1,-wfactor1), coord = ((.19+.03*k)*w, (0.2354-.0333*i)*h))
else:
for k in range(0,diff):
scene.drawImage(scene.img_diff2, scale = (wfactor1,-wfactor1), coord = ((.19+.03*k)*w, (0.2354-.0333*i)*h))
for k in range(0, 5-diff):
scene.drawImage(scene.img_diff1, scale = (wfactor1,-wfactor1), coord = ((.31-.03*k)*w, (0.2354-.0333*i)*h))
def renderMoreInfo(self, scene):
if not scene.items:
return
if not scene.selectedItem:
return
item = scene.selectedItem
i = scene.selectedIndex
y = 0
w, h = scene.geometry
font = scene.fontDict['songListFont']
scene.engine.fadeScreen(0.25)
if scene.moreInfoTime < 500:
y = 1.0-(float(scene.moreInfoTime)/500.0)
yI = y*h
if scene.img_panel:
scene.drawImage(scene.img_panel, scale = (1.0, -1.0), coord = (w*.5,h*.5+yI), stretched = FULL_SCREEN)
if scene.img_tabs:
r0 = (0, (1.0/3.0), 0, .5)
r1 = ((1.0/3.0),(2.0/3.0), 0, .5)
r2 = ((2.0/3.0),1.0,0,.5)
if scene.infoPage == 0:
r0 = (0, (1.0/3.0), .5, 1.0)
scene.drawImage(scene.img_tab1, scale = (.5, -.5), coord = (w*.5,h*.5+yI))
text = item.name
if item.artist != "":
text += " by %s" % item.artist
if item.year != "":
text += " (%s)" % item.year
scale = font.scaleText(text, .45, .0015)
font.render(text, (.52, .25-y), scale = scale, align = 1)
if scene.itemLabels[i]:
imgwidth = scene.itemLabels[i].width1()
wfactor = 95.000/imgwidth
scene.drawImage(scene.itemLabels[i], (wfactor, -wfactor), (w*.375,h*.5+yI))
elif scene.img_empty_label:
imgwidth = scene.img_empty_label.width1()
wfactor = 95.000/imgwidth
scene.drawImage(scene.img_empty_label, (wfactor, -wfactor), (w*.375,h*.5+yI))
text = item.album
if text == "":
text = _("No Album")
scale = font.scaleText(text, .2, .0015)
font.render(text, (.56, .305-y), scale = scale)
text = item.genre
if text == "":
text = _("No Genre")
scale = font.scaleText(text, .2, .0015)
font.render(text, (.56, .35-y), scale = scale)
elif scene.infoPage == 1:
r1 = ((1.0/3.0),(2.0/3.0), .5, 1.0)
scene.drawImage(scene.img_tab2, scale = (.5, -.5), coord = (w*.5,h*.5+yI))
elif scene.infoPage == 2:
r2 = ((2.0/3.0),1.0, .5, 1.0)
scene.drawImage(scene.img_tab3, scale = (.5, -.5), coord = (w*.5,h*.5+yI))
scene.drawImage(scene.img_tabs, scale = (.5*(1.0/3.0), -.25), coord = (w*.36,h*.72+yI), rect = r0)
scene.drawImage(scene.img_tabs, scale = (.5*(1.0/3.0), -.25), coord = (w*.51,h*.72+yI), rect = r1)
scene.drawImage(scene.img_tabs, scale = (.5*(1.0/3.0), -.25), coord = (w*.66,h*.72+yI), rect = r2)
def renderMiniLobby(self, scene):
return
__all__ = ["LEFT", "CENTER", "RIGHT", "_", "Theme", "shaders", "Setlist"]
|
maggotgdv/fofix
|
src/Theme.py
|
Python
|
gpl-2.0
| 126,381
|
# Rekall Memory Forensics
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
Darwin Session collectors.
"""
__author__ = "Adam Sindelar <adamsh@google.com>"
from rekall.entities import definitions
from rekall.plugins.collectors.darwin import common
from rekall.plugins.collectors.darwin import zones
class DarwinTerminalUserInferor3000(common.DarwinEntityCollector):
"""Infers the relationship between usernames and UIDs using tty sessions."""
outputs = ["User"]
collect_args = dict(
terminals=("Terminal/file matches (has component Permissions) and "
"Terminal/session"))
complete_input = True
def collect(self, hint, terminals):
for terminal in terminals:
owner = terminal["Terminal/file"]["Permissions/owner"]
user = terminal["Terminal/session"]["Session/user"]
# Now tell the manager that these two users are the same user.
if owner and user:
yield user.identity | owner.identity
class DarwinTTYZoneCollector(zones.DarwinZoneElementCollector):
outputs = ["Struct/type=tty"]
zone_name = "ttys"
type_name = "tty"
def validate_element(self, tty):
return tty.t_lock == tty
class DarwinClistParser(common.DarwinEntityCollector):
outputs = ["Buffer/purpose=terminal_input",
"Buffer/purpose=terminal_output"]
collect_args = dict(clists="Struct/type is 'clist'")
def collect(self, hint, clists):
for entity in clists:
clist = entity["Struct/base"]
yield [entity.identity,
definitions.Buffer(kind="ring",
state="freed",
contents=clist.recovered_contents,
start=clist.c_cs,
end=clist.c_ce,
size=clist.c_cn)]
class DarwinTTYParser(common.DarwinEntityCollector):
outputs = ["Terminal", "Struct/type=vnode", "Struct/type=clist",
"Buffer/purpose=terminal_input",
"Buffer/purpose=terminal_output"]
collect_args = dict(ttys="Struct/type is 'tty'")
def collect(self, hint, ttys):
for entity in ttys:
file_identity = None
session_identity = None
tty = entity["Struct/base"]
session = tty.t_session.deref()
vnode = session.s_ttyvp
if session:
session_identity = self.manager.identify({
"Struct/base": session})
if vnode:
# Look, it has a vnode!
yield definitions.Struct(base=vnode,
type="vnode")
file_identity = self.manager.identify({
"Struct/base": vnode})
# Yield just the stubs of the input and output ring buffers.
# DarwinClistParser will grab these if it cares.
yield [definitions.Struct(base=tty.t_rawq,
type="clist"),
definitions.Buffer(purpose="terminal_input",
context=entity.identity)]
yield [definitions.Struct(base=tty.t_outq,
type="clist"),
definitions.Buffer(purpose="terminal_output",
context=entity.identity)]
# Last, but not least, the Terminal itself.
yield [entity.identity,
definitions.Terminal(
session=session_identity,
file=file_identity)]
class DarwinSessionParser(common.DarwinEntityCollector):
"""Collects session entities from the memory objects."""
_name = "sessions"
outputs = ["Session",
"User",
"Struct/type=tty",
"Struct/type=proc"]
collect_args = dict(sessions="Struct/type is 'session'")
def collect(self, hint, sessions):
for entity in sessions:
session = entity["Struct/base"]
# Have to sanitize the usernames to prevent issues when comparing
# them later.
username = str(session.s_login).replace("\x00", "")
if username:
user_identity = self.manager.identify({
"User/username": username})
yield [user_identity,
definitions.User(
username=username)]
else:
user_identity = None
sid = session.s_sid
# Turns out, SID is not always unique. This is disabled as it is
# not being currently used, and I need to investigate the causes
# of duplicate sessions occurring on 10.10.
# session_identity = self.manager.identify({
# "Session/sid": sid}) | entity.identity
session_identity = entity.identity
if session.s_ttyp:
yield definitions.Struct(
base=session.s_ttyp,
type="tty")
if session.s_leader and session.s_leader.validate():
yield definitions.Struct(
base=session.s_leader.deref(),
type="proc")
yield [session_identity,
definitions.Session(
user=user_identity,
sid=sid),
definitions.Named(
name="SID %d" % int(sid),
kind="Session")]
class DarwinSessionZoneCollector(zones.DarwinZoneElementCollector):
"""Collects sessions from the sessions allocation zone."""
outputs = ["Struct/type=session"]
zone_name = "session"
type_name = "session"
def validate_element(self, session):
return session.s_count > 0 and session.s_leader.p_argc > 0
class DarwinSessionCollector(common.DarwinEntityCollector):
"""Collects sessions."""
outputs = ["Struct/type=session"]
def collect(self, hint):
session_hash_table_size = self.profile.get_constant_object(
"_sesshash", "unsigned long")
# The hashtable is an array to session list heads.
session_hash_table = self.profile.get_constant_object(
"_sesshashtbl",
target="Pointer",
target_args=dict(
target="Array",
target_args=dict(
target="sesshashhead",
count=session_hash_table_size.v())))
for sesshashhead in session_hash_table:
for session in sesshashhead.lh_first.walk_list("s_hash.le_next"):
yield definitions.Struct(
base=session,
type="session")
|
chen0031/rekall
|
rekall-core/rekall/plugins/collectors/darwin/sessions.py
|
Python
|
gpl-2.0
| 7,561
|
#!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# plot.py
# Purpose: plot Temp of running LIGGGHTS simulation via GnuPlot in Pizza.py
# Syntax: plot.py in.liggghts Nfreq Nsteps compute-ID
# in.liggghts = LIGGGHTS input script
# Nfreq = plot data point every this many steps
# Nsteps = run for this many steps
# compute-ID = ID of compute that calculates temperature
# (or any other scalar quantity)
import sys
sys.path.append("./pizza")
from gnu import gnu
# parse command line
argv = sys.argv
if len(argv) != 5:
print "Syntax: plot.py in.liggghts Nfreq Nsteps compute-ID"
sys.exit()
infile = sys.argv[1]
nfreq = int(sys.argv[2])
nsteps = int(sys.argv[3])
compute = sys.argv[4]
me = 0
# uncomment if running in parallel via Pypar
#import pypar
#me = pypar.rank()
#nprocs = pypar.size()
from liggghts import liggghts
lmp = liggghts()
# run infile all at once
# assumed to have no run command in it
lmp.file(infile)
lmp.command("thermo %d" % nfreq)
# initial 0-step run to generate initial 1-point plot
lmp.command("run 0 pre yes post no")
value = lmp.extract_compute(compute,0,0)
ntimestep = 0
xaxis = [ntimestep]
yaxis = [value]
# wrapper on GnuPlot via Pizza.py gnu tool
# just proc 0 handles plotting
if me == 0:
gn = gnu()
gn.plot(xaxis,yaxis)
gn.xrange(0,nsteps)
gn.title(compute,"Timestep","Temperature")
# run nfreq steps at a time w/out pre/post, query compute, refresh plot
while ntimestep < nsteps:
lmp.command("run %d pre no post no" % nfreq)
ntimestep += nfreq
value = lmp.extract_compute(compute,0,0)
xaxis.append(ntimestep)
yaxis.append(value)
if me == 0: gn.plot(xaxis,yaxis)
lmp.command("run 0 pre no post yes")
# uncomment if running in parallel via Pypar
#print "Proc %d out of %d procs has" % (me,nprocs), lmp
#pypar.finalize()
|
CFDEMproject/LIGGGHTS-PUBLIC
|
python/examples/plot.py
|
Python
|
gpl-2.0
| 1,885
|
"""Parallel testing, supporting arbitrary collection ordering
The Workflow
------------
- Master py.test process starts up, inspects config to decide how many slave to start, if at all
- env['parallel_base_urls'] is inspected first
- py.test config.option.appliances and the related --appliance cmdline flag are used
if env['parallel_base_urls'] isn't set
- if neither are set, no parallelization happens
- Slaves are started
- Master runs collection, blocks until slaves report their collections
- Slaves each run collection and submit them to the master, then block inside their runtest loop,
waiting for tests to run
- Master diffs slave collections against its own; the test ids are verified to match
across all nodes
- Master enters main runtest loop, uses a generator to build lists of test groups which are then
sent to slaves, one group at a time
- For each phase of each test, the slave serializes test reports, which are then unserialized on
the master and handed to the normal pytest reporting hooks, which is able to deal with test
reports arriving out of order
- Before running the last test in a group, the slave will request more tests from the master
- If more tests are received, they are run
- If no tests are received, the slave will shut down after running its final test
- After all slaves are shut down, the master will do its end-of-session reporting as usual, and
shut down
"""
from itertools import groupby
import difflib
import json
import os
import signal
import subprocess
from collections import defaultdict, deque, namedtuple
from datetime import datetime
from itertools import count
import attr
from threading import Thread
from time import sleep, time
import pytest
import zmq
from _pytest import runner
from fixtures import terminalreporter
from fixtures.parallelizer import remote
from fixtures.pytest_store import store
from cfme.utils import at_exit, conf
from cfme.utils.log import create_sublogger
from cfme.utils.path import conf_path
# Initialize slaveid to None, indicating this as the master process
# slaves will set this to a unique string when they're initialized
conf.runtime['env']['slaveid'] = None
if not conf.runtime['env'].get('ts'):
ts = str(time())
conf.runtime['env']['ts'] = ts
def pytest_addhooks(pluginmanager):
import hooks
pluginmanager.add_hookspecs(hooks)
@pytest.mark.trylast
def pytest_configure(config):
"""Configures the parallel session, then fires pytest_parallel_configured."""
reporter = terminalreporter.reporter()
holder = config.pluginmanager.get_plugin("appliance-holder")
appliances = holder.appliances
if len(appliances) > 1:
session = ParallelSession(config, appliances)
config.pluginmanager.register(session, "parallel_session")
store.parallelizer_role = 'master'
reporter.write_line(
'As a parallelizer master kicking off parallel session for these {} appliances'.format(
len(appliances)),
green=True)
config.hook.pytest_parallel_configured(parallel_session=session)
else:
reporter.write_line('No parallelization required', green=True)
config.hook.pytest_parallel_configured(parallel_session=None)
def handle_end_session(signal, frame):
# when signaled, end the current test session immediately
if store.parallel_session:
store.parallel_session.session_finished = True
signal.signal(signal.SIGQUIT, handle_end_session)
@attr.s(hash=False)
class SlaveDetail(object):
slaveid_generator = ('slave{:02d}'.format(i) for i in count())
appliance = attr.ib()
id = attr.ib(default=attr.Factory(
lambda: next(SlaveDetail.slaveid_generator)))
forbid_restart = attr.ib(default=False, init=False)
tests = attr.ib(default=attr.Factory(set), repr=False)
process = attr.ib(default=None, repr=False)
provider_allocation = attr.ib(default=attr.Factory(list), repr=False)
def start(self):
if self.forbid_restart:
return
devnull = open(os.devnull, 'w')
# worker output redirected to null; useful info comes via messages and logs
self.process = subprocess.Popen(
['python', remote.__file__, self.id, self.appliance.as_json, conf.runtime['env']['ts']],
stdout=devnull,
)
at_exit(self.process.kill)
def poll(self):
if self.process is not None:
return self.process.poll()
class ParallelSession(object):
def __init__(self, config, appliances):
self.config = config
self.session = None
self.session_finished = False
self.countfailures = 0
self.collection = []
self.sent_tests = 0
self.log = create_sublogger('master')
self.maxfail = config.getvalue("maxfail")
self._failed_collection_errors = {}
self.terminal = store.terminalreporter
self.trdist = None
self.slaves = {}
self.test_groups = self._test_item_generator()
self._pool = []
from cfme.utils.conf import cfme_data
self.provs = sorted(set(cfme_data['management_systems'].keys()),
key=len, reverse=True)
self.used_prov = set()
self.failed_slave_test_groups = deque()
self.slave_spawn_count = 0
self.appliances = appliances
# set up the ipc socket
zmq_endpoint = 'ipc://{}'.format(
config.cache.makedir('parallelize').join(str(os.getpid())))
ctx = zmq.Context.instance()
self.sock = ctx.socket(zmq.ROUTER)
self.sock.bind(zmq_endpoint)
# clean out old slave config if it exists
slave_config = conf_path.join('slave_config.yaml')
slave_config.check() and slave_config.remove()
# write out the slave config
conf.runtime['slave_config'] = {
'args': self.config.args,
'options': dict( # copy to avoid aliasing
self.config.option.__dict__,
use_sprout=False, # Slaves don't use sprout
),
'zmq_endpoint': zmq_endpoint,
}
if hasattr(self, "slave_appliances_data"):
conf.runtime['slave_config']["appliance_data"] = self.slave_appliances_data
conf.save('slave_config')
for appliance in self.appliances:
slave_data = SlaveDetail(appliance=appliance)
self.slaves[slave_data.id] = slave_data
for slave in sorted(self.slaves):
self.print_message("using appliance {}".format(self.slaves[slave].appliance.url),
slave, green=True)
def _slave_audit(self):
# XXX: There is currently no mechanism to add or remove slave_urls, short of
# firing up the debugger and doing it manually. This is making room for
# planned future abilities to dynamically add and remove slaves via automation
# check for unexpected slave shutdowns and redistribute tests
for slave in self.slaves.values():
returncode = slave.poll()
if returncode:
slave.process = None
if returncode == -9:
msg = '{} killed due to error, respawning'.format(slave.id)
else:
msg = '{} terminated unexpectedly with status {}, respawning'.format(
slave.id, returncode)
if slave.tests:
failed_tests, slave.tests = slave.tests, set()
num_failed_tests = len(failed_tests)
self.sent_tests -= num_failed_tests
msg += ' and redistributing {} tests'.format(num_failed_tests)
self.failed_slave_test_groups.append(failed_tests)
self.print_message(msg, purple=True)
# If a slave was terminated for any reason, kill that slave
# the terminated flag implies the appliance has died :(
for slave in list(self.slaves.values()):
if slave.forbid_restart:
if slave.process is None:
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.appliance.url)
del self.slaves[slave.id]
else:
# no hook call here, a future audit will handle the fallout
self.print_message(
"{}'s appliance has died, deactivating slave".format(slave.id))
self.interrupt(slave)
else:
if slave.process is None:
slave.start()
self.slave_spawn_count += 1
def send(self, slave, event_data):
"""Send data to slave.
``event_data`` will be serialized as JSON, and so must be JSON serializable
"""
event_json = json.dumps(event_data)
self.sock.send_multipart([slave.id, '', event_json])
def recv(self):
# poll the zmq socket, populate the recv queue deque with responses
events = zmq.zmq_poll([(self.sock, zmq.POLLIN)], 50)
if not events:
return None, None, None
slaveid, _, event_json = self.sock.recv_multipart(flags=zmq.NOBLOCK)
event_data = json.loads(event_json)
event_name = event_data.pop('_event_name')
if slaveid not in self.slaves:
self.log.error("message from terminated worker %s %s %s",
slaveid, event_name, event_data)
return None, None, None
return self.slaves[slaveid], event_data, event_name
def print_message(self, message, prefix='master', **markup):
"""Print a message from a node to the py.test console
Args:
message: The message to print
**markup: If set, overrides the default markup when printing the message
"""
# differentiate master and slave messages by default
prefix = getattr(prefix, 'id', prefix)
if not markup:
if prefix == 'master':
markup = {'blue': True}
else:
markup = {'cyan': True}
stamp = datetime.now().strftime("%Y%m%d %H:%M:%S")
self.terminal.write_ensure_prefix(
'({})[{}] '.format(prefix, stamp), message, **markup)
def ack(self, slave, event_name):
"""Acknowledge a slave's message"""
self.send(slave, 'ack {}'.format(event_name))
def monitor_shutdown(self, slave):
# non-daemon so slaves get every opportunity to shut down cleanly
shutdown_thread = Thread(target=self._monitor_shutdown_t,
args=(slave.id, slave.process))
shutdown_thread.start()
def _monitor_shutdown_t(self, slaveid, process):
# a KeyError here means self.slaves got mangled, indicating a problem elsewhere
if process is None:
self.log.warning('Slave was missing when trying to monitor shutdown')
def sleep_and_poll():
start_time = time()
# configure the polling logic
polls = 0
# how often to poll
poll_sleep_time = .5
# how often to report (calculated to be around once a minute based on poll_sleep_time)
poll_report_modulo = 60 / poll_sleep_time
# maximum time to wait
poll_num_sec = 300
while (time() - start_time) < poll_num_sec:
polls += 1
yield
if polls % poll_report_modulo == 0:
remaining_time = int(poll_num_sec - (time() - start_time))
self.print_message(
'{} still shutting down, '
'will continue polling for {} seconds '
.format(slaveid, remaining_time), blue=True)
sleep(poll_sleep_time)
# start the poll
for poll in sleep_and_poll():
ec = process.poll()
if ec is None:
continue
else:
if ec == 0:
self.print_message('{} exited'.format(slaveid), green=True)
else:
self.print_message('{} died'.format(slaveid), red=True)
break
else:
self.print_message('{} failed to shut down gracefully; killed'.format(slaveid),
red=True)
process.kill()
def interrupt(self, slave, **kwargs):
"""Nicely ask a slave to terminate"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.send_signal(subprocess.signal.SIGINT)
self.monitor_shutdown(slave, **kwargs)
def kill(self, slave, **kwargs):
"""Rudely kill a slave"""
slave.forbid_restart = True
if slave.poll() is None:
slave.process.kill()
self.monitor_shutdown(slave, **kwargs)
def send_tests(self, slave):
"""Send a slave a group of tests"""
try:
tests = list(self.failed_slave_test_groups.popleft())
except IndexError:
tests = self.get(slave)
self.send(slave, tests)
slave.tests.update(tests)
collect_len = len(self.collection)
tests_len = len(tests)
self.sent_tests += tests_len
if tests:
self.print_message('sent {} tests to {} ({}/{}, {:.1f}%)'.format(
tests_len, slave.id, self.sent_tests, collect_len,
self.sent_tests * 100. / collect_len
))
return tests
def pytest_sessionstart(self, session):
"""pytest sessionstart hook
- sets up distributed terminal reporter
- sets up zmp ipc socket for the slaves to use
- writes pytest options and args to slave_config.yaml
- starts the slaves
- register atexit kill hooks to destroy slaves at the end if things go terribly wrong
"""
# If reporter() gave us a fake terminal reporter in __init__, the real
# terminal reporter is registered by now
self.terminal = store.terminalreporter
self.trdist = TerminalDistReporter(self.config, self.terminal)
self.config.pluginmanager.register(self.trdist, "terminaldistreporter")
self.session = session
def pytest_runtestloop(self):
"""pytest runtest loop
- Disable the master terminal reporter hooks, so we can add our own handlers
that include the slaveid in the output
- Send tests to slaves when they ask
- Log the starting of tests and test results, including slave id
- Handle clean slave shutdown when they finish their runtest loops
- Restore the master terminal reporter after testing so we get the final report
"""
# Build master collection for slave diffing and distribution
self.collection = [item.nodeid for item in self.session.items]
# Fire up the workers after master collection is complete
# master and the first slave share an appliance, this is a workaround to prevent a slave
# from altering an appliance while master collection is still taking place
for slave in self.slaves.values():
slave.start()
try:
self.print_message("Waiting for {} slave collections".format(len(self.slaves)),
red=True)
# Turn off the terminal reporter to suppress the builtin logstart printing
terminalreporter.disable()
while True:
# spawn/kill/replace slaves if needed
self._slave_audit()
if not self.slaves:
# All slaves are killed or errored, we're done with tests
self.print_message('all slaves have exited', yellow=True)
self.session_finished = True
if self.session_finished:
break
slave, event_data, event_name = self.recv()
if event_name == 'message':
message = event_data.pop('message')
markup = event_data.pop('markup')
# messages are special, handle them immediately
self.print_message(message, slave, **markup)
self.ack(slave, event_name)
elif event_name == 'collectionfinish':
slave_collection = event_data['node_ids']
# compare slave collection to the master, all test ids must be the same
self.log.debug('diffing {} collection'.format(slave.id))
diff_err = report_collection_diff(
slave.id, self.collection, slave_collection)
if diff_err:
self.print_message(
'collection differs, respawning', slave.id,
purple=True)
self.print_message(diff_err, purple=True)
self.log.error('{}'.format(diff_err))
self.kill(slave)
slave.start()
else:
self.ack(slave, event_name)
elif event_name == 'need_tests':
self.send_tests(slave)
self.log.info('starting master test distribution')
elif event_name == 'runtest_logstart':
self.ack(slave, event_name)
self.trdist.runtest_logstart(
slave.id,
event_data['nodeid'],
event_data['location'])
elif event_name == 'runtest_logreport':
self.ack(slave, event_name)
report = unserialize_report(event_data['report'])
if report.when in ('call', 'teardown'):
slave.tests.discard(report.nodeid)
self.trdist.runtest_logreport(slave.id, report)
elif event_name == 'internalerror':
self.ack(slave, event_name)
self.print_message(event_data['message'], slave, purple=True)
self.kill(slave)
elif event_name == 'shutdown':
self.config.hook.pytest_miq_node_shutdown(
config=self.config, nodeinfo=slave.appliance.url)
self.ack(slave, event_name)
del self.slaves[slave.id]
self.monitor_shutdown(slave)
# total slave spawn count * 3, to allow for each slave's initial spawn
# and then each slave (on average) can fail two times
if self.slave_spawn_count >= len(self.appliances) * 3:
self.print_message(
'too many slave respawns, exiting',
red=True, bold=True)
raise KeyboardInterrupt('Interrupted due to slave failures')
except Exception as ex:
self.log.error('Exception in runtest loop:')
self.log.exception(ex)
self.print_message(str(ex))
raise
finally:
terminalreporter.enable()
# Suppress other runtestloop calls
return True
def _test_item_generator(self):
for tests in self._modscope_item_generator():
yield tests
def _modscope_item_generator(self):
# breaks out tests by module, can work just about any way we want
# as long as it yields lists of tests id from the master collection
sent_tests = 0
collection_len = len(self.collection)
def get_fspart(nodeid):
return nodeid.split('::')[0]
for fspath, gen_moditems in groupby(self.collection, key=get_fspart):
for tests in self._modscope_id_splitter(gen_moditems):
sent_tests += len(tests)
self.log.info('{} tests remaining to send'.format(
collection_len - sent_tests))
yield list(tests)
def _modscope_id_splitter(self, module_items):
# given a list of item ids from one test module, break up tests into groups with the same id
parametrized_ids = defaultdict(list)
for item in module_items:
if '[' in item:
# split on the leftmost bracket, then strip everything after the rightmight bracket
# so 'test_module.py::test_name[parametrized_id]' becomes 'parametrized_id'
parametrized_id = item.split('[')[1].rstrip(']')
else:
# splits failed, item has no parametrized id
parametrized_id = 'no params'
parametrized_ids[parametrized_id].append(item)
for id, tests in parametrized_ids.items():
if tests:
self.log.info('sent tests with param {} {!r}'.format(id, tests))
yield tests
def get(self, slave):
def provs_of_tests(test_group):
found = set()
for test in test_group:
found.update(pv for pv in self.provs
if '[' in test and pv in test)
return sorted(found)
if not self._pool:
for test_group in self.test_groups:
self._pool.append(test_group)
self.used_prov.update(provs_of_tests(test_group))
if self.used_prov:
self.ratio = float(len(self.slaves)) / len(self.used_prov)
else:
self.ratio = 0.0
if not self._pool:
return []
appliance_num_limit = 1
for idx, test_group in enumerate(self._pool):
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
if prov in slave.provider_allocation:
# provider is already with the slave, so just return the tests
self._pool.remove(test_group)
return test_group
else:
if len(slave.provider_allocation) >= appliance_num_limit:
continue
else:
# Adding provider to slave since there are not too many
slave.provider_allocation.append(prov)
self._pool.remove(test_group)
return test_group
else:
# No providers - ie, not a provider parametrized test
# or no params, so not parametrized at all
self._pool.remove(test_group)
return test_group
# Here means no tests were able to be sent
for test_group in self._pool:
provs = provs_of_tests(test_group)
if provs:
prov = provs[0]
# Already too many slaves with provider
app = slave.appliance
self.print_message(
'cleansing appliance', slave, purple=True)
try:
app.delete_all_providers()
except Exception as e:
self.print_message(
'cloud not cleanse', slave, red=True)
self.print_message('error:', e, red=True)
slave.provider_allocation = [prov]
self._pool.remove(test_group)
return test_group
assert not self._pool, self._pool
return []
def report_collection_diff(slaveid, from_collection, to_collection):
"""Report differences, if any exist, between master and a slave collection
Raises RuntimeError if collections differ
Note:
This function will sort functions before comparing them.
"""
from_collection, to_collection = sorted(from_collection), sorted(to_collection)
if from_collection == to_collection:
# Well, that was easy.
return
# diff the two, so we get some idea of what's wrong
diff = difflib.unified_diff(
from_collection,
to_collection,
fromfile='master',
tofile=slaveid,
)
# diff is a line generator, stringify it
diff = '\n'.join([line.rstrip() for line in diff])
return '{slaveid} diff:\n{diff}\n'.format(slaveid=slaveid, diff=diff)
class TerminalDistReporter(object):
"""Terminal Reporter for Distributed Testing
trdist reporter exists to make sure we get good distributed logging during the runtest loop,
which means the normal terminal reporter should be disabled during the loop
This class is where we make sure the terminal reporter is made aware of whatever state it
needs to report properly once we turn it back on after the runtest loop
It has special versions of pytest reporting hooks that, where possible, try to include a
slave ID. These hooks are called in :py:class:`ParallelSession`'s runtestloop hook.
"""
def __init__(self, config, terminal):
self.config = config
self.tr = terminal
self.outcomes = {}
def runtest_logstart(self, slaveid, nodeid, location):
test = self.tr._locationline(nodeid, *location)
prefix = '({}) {}'.format(slaveid, test)
self.tr.write_ensure_prefix(prefix, 'running', blue=True)
self.config.hook.pytest_runtest_logstart(nodeid=nodeid, location=location)
def runtest_logreport(self, slaveid, report):
# Run all the normal logreport hooks
self.config.hook.pytest_runtest_logreport(report=report)
# Now do what the terminal reporter would normally do, but include parallelizer info
outcome, letter, word = self.config.hook.pytest_report_teststatus(report=report)
# Stash stats on the terminal reporter so it reports properly
# after it's reenabled at the end of runtestloop
self.tr.stats.setdefault(outcome, []).append(report)
test = self.tr._locationline(report.nodeid, *report.location)
prefix = '({}) {}'.format(slaveid, test)
try:
# for some reason, pytest_report_teststatus returns a word, markup tuple
# when the word would be 'XPASS', so unpack it here if that's the case
word, markup = word
except (TypeError, ValueError):
# word wasn't iterable or didn't have enough values, use it as-is
pass
if word in ('PASSED', 'xfail'):
markup = {'green': True}
elif word in ('ERROR', 'FAILED', 'XPASS'):
markup = {'red': True}
elif word:
markup = {'yellow': True}
# For every stage where we can report the outcome, stash it in the outcomes dict
if word:
self.outcomes[test] = Outcome(word, markup)
# Then, when we get to the teardown report, print the last outcome
# This prevents reportings a test as 'PASSED' if its teardown phase fails, for example
if report.when == 'teardown':
word, markup = self.outcomes.pop(test)
self.tr.write_ensure_prefix(prefix, word, **markup)
Outcome = namedtuple('Outcome', ['word', 'markup'])
def unserialize_report(reportdict):
"""
Generate a :py:class:`TestReport <pytest:_pytest.runner.TestReport>` from a serialized report
"""
return runner.TestReport(**reportdict)
|
jkandasa/integration_tests
|
fixtures/parallelizer/__init__.py
|
Python
|
gpl-2.0
| 27,463
|
# vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Additional auxiliary data types"""
from itertools import islice
__license__ = """\
Copyright (C) 2006-2012 Tamás Nepusz <ntamas@gmail.com>
Pázmány Péter sétány 1/a, 1117 Budapest, Hungary
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301 USA
"""
class Matrix(object):
"""Simple matrix data type.
Of course there are much more advanced matrix data types for Python (for
instance, the C{ndarray} data type of Numeric Python) and this implementation
does not want to compete with them. The only role of this data type is to
provide a convenient interface for the matrices returned by the C{Graph}
object (for instance, allow indexing with tuples in the case of adjacency
matrices and so on).
"""
def __init__(self, data=None):
"""Initializes a matrix.
@param data: the elements of the matrix as a list of lists, or C{None} to
create a 0x0 matrix.
"""
self._nrow, self._ncol, self._data = 0, 0, []
self.data = data
# pylint: disable-msg=C0103
@classmethod
def Fill(cls, value, *args):
"""Creates a matrix filled with the given value
@param value: the value to be used
@keyword shape: the shape of the matrix. Can be a single integer,
two integers or a tuple. If a single integer is
given here, the matrix is assumed to be square-shaped.
"""
if len(args) < 1:
raise TypeError("expected an integer or a tuple")
if len(args) == 1:
if hasattr(args[0], "__len__"):
height, width = int(args[0][0]), int(args[0][1])
else:
height, width = int(args[0]), int(args[0])
else:
height, width = int(args[0]), int(args[1])
mtrx = [[value]*width for _ in xrange(height)]
return cls(mtrx)
# pylint: disable-msg=C0103
@classmethod
def Zero(cls, *args):
"""Creates a matrix filled with zeros.
@keyword shape: the shape of the matrix. Can be a single integer,
two integers or a tuple. If a single integer is
given here, the matrix is assumed to be square-shaped.
"""
result = cls.Fill(0, *args)
return result
# pylint: disable-msg=C0103
@classmethod
def Identity(cls, *args):
"""Creates an identity matrix.
@keyword shape: the shape of the matrix. Can be a single integer,
two integers or a tuple. If a single integer is
given here, the matrix is assumed to be square-shaped.
"""
# pylint: disable-msg=W0212
result = cls.Fill(0, *args)
for i in xrange(min(result.shape)):
result._data[i][i] = 1
return result
def _set_data(self, data=None):
"""Sets the data stored in the matrix"""
if data is not None:
self._data = [list(row) for row in data]
self._nrow = len(self._data)
if self._nrow > 0:
self._ncol = max(len(row) for row in self._data)
else:
self._ncol = 0
for row in self._data:
if len(row) < self._ncol:
row.extend([0]*(self._ncol-len(row)))
def _get_data(self):
"""Returns the data stored in the matrix as a list of lists"""
return [list(row) for row in self._data]
data = property(_get_data, _set_data)
@property
def shape(self):
"""Returns the shape of the matrix as a tuple"""
return self._nrow, self._ncol
def __add__(self, other):
"""Adds the given value to the matrix.
@param other: either a scalar or a matrix. Scalars will
be added to each element of the matrix. Matrices will
be added together elementwise.
@return: the result matrix
"""
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("matrix shapes do not match")
return self.__class__([
[a+b for a, b in izip(row_a, row_b)]
for row_a, row_b in izip(self, other)
])
else:
return self.__class__([
[item+other for item in row] for row in self])
def __eq__(self, other):
"""Checks whether a given matrix is equal to another one"""
return isinstance(other, Matrix) and \
self._nrow == other._nrow and \
self._ncol == other._ncol and \
self._data == other._data
def __getitem__(self, i):
"""Returns a single item, a row or a column of the matrix
@param i: if a single integer, returns the M{i}th row as a list. If a
slice, returns the corresponding rows as another L{Matrix} object. If
a 2-tuple, the first element of the tuple is used to select a row and
the second is used to select a column.
"""
if isinstance(i, int):
return list(self._data[i])
elif isinstance(i, slice):
return self.__class__(self._data[i])
elif isinstance(i, tuple):
try:
first = i[0]
except IndexError:
first = slice(None)
try:
second = i[1]
except IndexError:
second = slice(None)
if type(first) == slice and type(second) == slice:
return self.__class__(row[second] for row in self._data[first])
elif type(first) == slice:
return [row[second] for row in self._data[first]]
else:
return self._data[first][second]
else:
raise IndexError("invalid matrix index")
def __hash__(self):
"""Returns a hash value for a matrix."""
return hash(self._nrow, self._ncol, self._data)
def __iadd__(self, other):
"""In-place addition of a matrix or scalar."""
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("matrix shapes do not match")
for row_a, row_b in izip(self._data, other):
for i in xrange(len(row_a)):
row_a[i] += row_b[i]
else:
for row in self._data:
for i in xrange(len(row)):
row[i] += other
return self
def __isub__(self, other):
"""In-place subtraction of a matrix or scalar."""
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("matrix shapes do not match")
for row_a, row_b in izip(self._data, other):
for i in xrange(len(row_a)):
row_a[i] -= row_b[i]
else:
for row in self._data:
for i in xrange(len(row)):
row[i] -= other
return self
def __ne__(self, other):
"""Checks whether a given matrix is not equal to another one"""
return not self == other
def __setitem__(self, i, value):
"""Sets a single item, a row or a column of the matrix
@param i: if a single integer, sets the M{i}th row as a list. If a
slice, sets the corresponding rows from another L{Matrix} object.
If a 2-tuple, the first element of the tuple is used to select a row
and the second is used to select a column.
@param value: the new value
"""
if isinstance(i, int):
# Setting a row
if len(value) != len(self._data[i]):
raise ValueError("new value must have %d items" % self._ncol)
self._data[i] = list(value)
elif isinstance(i, slice):
# Setting multiple rows
if len(value) != len(self._data[i]):
raise ValueError("new value must have %d items" % self._ncol)
if any(len(row) != self._ncol for row in value):
raise ValueError("rows of new value must have %d items" % \
self._ncol)
self._data[i] = [list(row) for row in value]
elif isinstance(i, tuple):
try:
first = i[0]
except IndexError:
first = slice(None)
try:
second = i[1]
except IndexError:
second = slice(None)
if type(first) == slice and type(second) == slice:
# Setting a submatrix
# TODO
raise NotImplementedError
elif type(first) == slice:
# Setting a submatrix
raise NotImplementedError
else:
# Setting a single element
self._data[first][second] = value
else:
raise IndexError("invalid matrix index")
def __sub__(self, other):
"""Subtracts the given value from the matrix.
@param other: either a scalar or a matrix. Scalars will
be subtracted from each element of the matrix. Matrices will
be subtracted together elementwise.
@return: the result matrix
"""
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("matrix shapes do not match")
return self.__class__([
[a-b for a, b in izip(row_a, row_b)]
for row_a, row_b in izip(self, other)
])
else:
return self.__class__([
[item-other for item in row] for row in self])
def __repr__(self):
class_name = self.__class__.__name__
rows = ("[%s]" % ", ".join(repr(item) for item in row) for row in self)
return "%s([%s])" % (class_name, ", ".join(rows))
def __str__(self):
rows = ("[%s]" % ", ".join(repr(item) for item in row) for row in self)
return "[%s]" % "\n ".join(rows)
def __iter__(self):
"""Support for iteration.
This is actually implemented as a generator, so there is no need for a
separate iterator class. The generator returns I{copies} of the rows in
the matrix as lists to avoid messing around with the internals. Feel
free to do anything with the copies, the changes won't be reflected in
the original matrix."""
return (list(row) for row in self._data)
def __plot__(self, context, bbox, palette, **kwds):
"""Plots the matrix to the given Cairo context in the given box
Besides the usual self-explanatory plotting parameters (C{context},
C{bbox}, C{palette}), it accepts the following keyword arguments:
- C{style}: the style of the plot. C{boolean} is useful for plotting
matrices with boolean (C{True}/C{False} or 0/1) values: C{False}
will be shown with a white box and C{True} with a black box.
C{palette} uses the given palette to represent numbers by colors,
the minimum will be assigned to palette color index 0 and the maximum
will be assigned to the length of the palette. C{None} draws transparent
cell backgrounds only. The default style is C{boolean} (but it may
change in the future). C{None} values in the matrix are treated
specially in both cases: nothing is drawn in the cell corresponding
to C{None}.
- C{square}: whether the cells of the matrix should be square or not.
Default is C{True}.
- C{grid_width}: line width of the grid shown on the matrix. If zero or
negative, the grid is turned off. The grid is also turned off if the size
of a cell is less than three times the given line width. Default is C{1}.
Fractional widths are also allowed.
- C{border_width}: line width of the border drawn around the matrix.
If zero or negative, the border is turned off. Default is C{1}.
- C{row_names}: the names of the rows
- C{col_names}: the names of the columns.
- C{values}: values to be displayed in the cells. If C{None} or
C{False}, no values are displayed. If C{True}, the values come
from the matrix being plotted. If it is another matrix, the
values of that matrix are shown in the cells. In this case,
the shape of the value matrix must match the shape of the
matrix being plotted.
- C{value_format}: a format string or a callable that specifies how
the values should be plotted. If it is a callable, it must be a
function that expects a single value and returns a string.
Example: C{"%#.2f"} for floating-point numbers with always exactly
two digits after the decimal point. See the Python documentation of
the C{%} operator for details on the format string. If the format
string is not given, it defaults to the C{str} function.
If only the row names or the column names are given and the matrix
is square-shaped, the same names are used for both column and row
names.
"""
# pylint: disable-msg=W0142
# pylint: disable-msg=C0103
grid_width = float(kwds.get("grid_width", 1.))
border_width = float(kwds.get("border_width", 1.))
style = kwds.get("style", "boolean")
row_names = kwds.get("row_names")
col_names = kwds.get("col_names", row_names)
values = kwds.get("values")
value_format = kwds.get("value_format", str)
# Validations
if style not in ("boolean", "palette", "none", None):
raise ValueError("invalid style")
if style == "none":
style = None
if row_names is None and col_names is not None:
row_names = col_names
if row_names is not None:
row_names = [str(name) for name in islice(row_names, self._nrow)]
if len(row_names) < self._nrow:
row_names.extend([""]*(self._nrow-len(row_names)))
if col_names is not None:
col_names = [str(name) for name in islice(col_names, self._ncol)]
if len(col_names) < self._ncol:
col_names.extend([""]*(self._ncol-len(col_names)))
if values == False:
values = None
if values == True:
values = self
if isinstance(values, list):
values = Matrix(list)
if values is not None and not isinstance(values, Matrix):
raise TypeError("values must be None, False, True or a matrix")
if values is not None and values.shape != self.shape:
raise ValueError("values must be a matrix of size %s" % self.shape)
# Calculate text extents if needed
if row_names is not None or col_names is not None:
te = context.text_extents
space_width = te(" ")[4]
max_row_name_width = max([te(s)[4] for s in row_names])+space_width
max_col_name_width = max([te(s)[4] for s in col_names])+space_width
else:
max_row_name_width, max_col_name_width = 0, 0
# Calculate sizes
total_width = float(bbox.width)-max_row_name_width
total_height = float(bbox.height)-max_col_name_width
dx = total_width / self.shape[1]
dy = total_height / self.shape[0]
if kwds.get("square", True):
dx, dy = min(dx, dy), min(dx, dy)
total_width, total_height = dx*self.shape[1], dy*self.shape[0]
ox = bbox.left + (bbox.width - total_width - max_row_name_width) / 2.0
oy = bbox.top + (bbox.height - total_height - max_col_name_width) / 2.0
ox += max_row_name_width
oy += max_col_name_width
# Determine rescaling factors for the palette if needed
if style == "palette":
mi, ma = self.min(), self.max()
color_offset = mi
color_ratio = (len(palette)-1) / float(ma-mi)
# Validate grid width
if dx < 3*grid_width or dy < 3*grid_width:
grid_width = 0.
if grid_width > 0:
context.set_line_width(grid_width)
else:
# When the grid width is zero, we will still stroke the
# rectangles, but with the same color as the fill color
# of the cell - otherwise we would get thin white lines
# between the cells as a drawing artifact
context.set_line_width(1)
# Draw row names (if any)
context.set_source_rgb(0., 0., 0.)
if row_names is not None:
x, y = ox, oy
for heading in row_names:
_, _, _, h, xa, _ = context.text_extents(heading)
context.move_to(x-xa-space_width, y + (dy+h)/2.)
context.show_text(heading)
y += dy
# Draw column names (if any)
if col_names is not None:
context.save()
context.translate(ox, oy)
context.rotate(-1.5707963285) # pi/2
x, y = 0., 0.
for heading in col_names:
_, _, _, h, _, _ = context.text_extents(heading)
context.move_to(x+space_width, y + (dx+h)/2.)
context.show_text(heading)
y += dx
context.restore()
# Draw matrix
x, y = ox, oy
if style is None:
fill = lambda: None
else:
fill = context.fill_preserve
for row in self:
for item in row:
if item is None:
x += dx
continue
if style == "boolean":
if item:
context.set_source_rgb(0., 0., 0.)
else:
context.set_source_rgb(1., 1., 1.)
elif style == "palette":
cidx = int((item-color_offset)*color_ratio)
if cidx < 0:
cidx = 0
context.set_source_rgba(*palette.get(cidx))
context.rectangle(x, y, dx, dy)
if grid_width > 0:
fill()
context.set_source_rgb(0.5, 0.5, 0.5)
context.stroke()
else:
fill()
context.stroke()
x += dx
x, y = ox, y+dy
# Draw cell values
if values is not None:
x, y = ox, oy
context.set_source_rgb(0., 0., 0.)
for row in values.data:
if hasattr(value_format, "__call__"):
values = [value_format(item) for item in row]
else:
values = [value_format % item for item in row]
for item in values:
th, tw = context.text_extents(item)[3:5]
context.move_to(x+(dx-tw)/2., y+(dy+th)/2.)
context.show_text(item)
x += dx
x, y = ox, y+dy
# Draw borders
if border_width > 0:
context.set_line_width(border_width)
context.set_source_rgb(0., 0., 0.)
context.rectangle(ox, oy, dx*self.shape[1], dy*self.shape[0])
context.stroke()
def min(self, dim=None):
"""Returns the minimum of the matrix along the given dimension
@param dim: the dimension. 0 means determining the column minimums, 1 means
determining the row minimums. If C{None}, the global minimum is
returned.
"""
if dim == 1:
return [min(row) for row in self._data]
if dim == 0:
return [min(row[idx] for row in self._data) \
for idx in xrange(self._ncol)]
return min(min(row) for row in self._data)
def max(self, dim=None):
"""Returns the maximum of the matrix along the given dimension
@param dim: the dimension. 0 means determining the column maximums, 1 means
determining the row maximums. If C{None}, the global maximum is
returned.
"""
if dim == 1:
return [max(row) for row in self._data]
if dim == 0:
return [max(row[idx] for row in self._data) \
for idx in xrange(self._ncol)]
return max(max(row) for row in self._data)
class DyadCensus(tuple):
"""Dyad census of a graph.
This is a pretty simple class - basically it is a tuple, but it allows
the user to refer to its individual items by the names C{mutual} (or
C{mut}), C{asymmetric} (or C{asy} or C{asym} or C{asymm}) and C{null}.
Examples:
>>> from igraph import Graph
>>> g=Graph.Erdos_Renyi(100, 0.2, directed=True)
>>> dc=g.dyad_census()
>>> print dc.mutual #doctest:+SKIP
179
>>> print dc["asym"] #doctest:+SKIP
1609
>>> print tuple(dc), list(dc) #doctest:+SKIP
(179, 1609, 3162) [179, 1609, 3162]
>>> print sorted(dc.as_dict().items()) #doctest:+ELLIPSIS
[('asymmetric', ...), ('mutual', ...), ('null', ...)]
@undocumented: _remap
"""
_remap = {"mutual": 0, "mut": 0, "sym": 0, "symm": 0,
"asy": 1, "asym": 1, "asymm": 1, "asymmetric": 1, "null": 2}
def __getitem__(self, idx):
return tuple.__getitem__(self, self._remap.get(idx, idx))
def __getattr__(self, attr):
if attr in self._remap:
return tuple.__getitem__(self, self._remap[attr])
raise AttributeError("no such attribute: %s" % attr)
def __repr__(self):
return "DyadCensus((%d, %d, %d))" % self
def __str__(self):
return "%d mutual, %d asymmetric, %d null dyads" % self
def as_dict(self):
"""Converts the dyad census to a dict using the known dyad names."""
return {"mutual": self[0], "asymmetric": self[1], "null": self[2]}
class TriadCensus(tuple):
"""Triad census of a graph.
This is a pretty simple class - basically it is a tuple, but it allows
the user to refer to its individual items by the following triad names:
- C{003} -- the empty graph
- C{012} -- a graph with a single directed edge (C{A --> B, C})
- C{102} -- a graph with a single mutual edge (C{A <-> B, C})
- C{021D} -- the binary out-tree (C{A <-- B --> C})
- C{021U} -- the binary in-tree (C{A --> B <-- C})
- C{021C} -- the directed line (C{A --> B --> C})
- C{111D} -- C{A <-> B <-- C}
- C{111U} -- C{A <-> B --> C}
- C{030T} -- C{A --> B <-- C, A --> C}
- C{030C} -- C{A <-- B <-- C, A --> C}
- C{201} -- C{A <-> B <-> C}
- C{120D} -- C{A <-- B --> C, A <-> C}
- C{120U} -- C{A --> B <-- C, A <-> C}
- C{120C} -- C{A --> B --> C, A <-> C}
- C{210C} -- C{A --> B <-> C, A <-> C}
- C{300} -- the complete graph (C{A <-> B <-> C, A <-> C})
Attribute and item accessors are provided. Due to the syntax of Python,
attribute names are not allowed to start with a number, therefore the
triad names must be prepended with a lowercase C{t} when accessing
them as attributes. This is not necessary with the item accessor syntax.
Examples:
>>> from igraph import Graph
>>> g=Graph.Erdos_Renyi(100, 0.2, directed=True)
>>> tc=g.triad_census()
>>> print tc.t003 #doctest:+SKIP
39864
>>> print tc["030C"] #doctest:+SKIP
1206
"""
_remap = {"003": 0, "012": 1, "102": 2, "021D": 3, "021U": 4, "021C": 5, \
"111D": 6, "111U": 7, "030T": 8, "030C": 9, "201": 10, "120D": 11, \
"120U": 12, "120C": 13, "210": 14, "300": 15}
def __getitem__(self, idx):
if isinstance(idx, basestring):
idx = idx.upper()
return tuple.__getitem__(self, self._remap.get(idx, idx))
def __getattr__(self, attr):
if isinstance(attr, basestring) and attr[0] == 't' \
and attr[1:].upper() in self._remap:
return tuple.__getitem__(self, self._remap[attr[1:].upper()])
raise AttributeError("no such attribute: %s" % attr)
def __repr__(self):
return "TriadCensus((%s))" % ", ".join(str(item) for item in self)
def __str__(self):
maxidx = len(self)
maxcount = max(self)
numwidth = len(str(maxcount))
captionwidth = max(len(key) for key in self._remap)
colcount = 4
rowcount = maxidx / colcount
if rowcount * colcount < maxidx:
rowcount += 1
invmap = dict((v, k) for k, v in self._remap.iteritems())
result, row, idx = [], [], 0
for _ in xrange(rowcount):
for _ in xrange(colcount):
if idx >= maxidx:
break
row.append("%-*s: %*d" % (captionwidth, invmap.get(idx, ""),
numwidth, self[idx]))
idx += 1
result.append(" | ".join(row))
row = []
return "\n".join(result)
class UniqueIdGenerator(object):
"""A dictionary-like class that can be used to assign unique IDs to
names (say, vertex names).
Usage:
>>> gen = UniqueIdGenerator()
>>> gen["A"]
0
>>> gen["B"]
1
>>> gen["C"]
2
>>> gen["A"] # Retrieving already existing ID
0
>>> gen.add("D") # Synonym of gen["D"]
3
>>> len(gen) # Number of already used IDs
4
>>> "C" in gen
True
>>> "E" in gen
False
"""
def __init__(self, id_generator=None, initial=None):
"""Creates a new unique ID generator. `id_generator` specifies how do we
assign new IDs to elements that do not have an ID yet. If it is `None`,
elements will be assigned integer identifiers starting from 0. If it is
an integer, elements will be assigned identifiers starting from the given
integer. If it is an iterator or generator, its `next` method will be
called every time a new ID is needed."""
if id_generator is None:
id_generator = 0
if isinstance(id_generator, int):
import itertools
self._generator = itertools.count(id_generator)
else:
self._generator = id_generator
self._ids = {}
if initial:
for value in initial:
self.add(value)
def __contains__(self, item):
"""Checks whether `item` already has an ID or not."""
return item in self._ids
def __getitem__(self, item):
"""Retrieves the ID corresponding to `item`. Generates a new ID for
`item` if it is the first time we request an ID for it."""
try:
return self._ids[item]
except KeyError:
self._ids[item] = self._generator.next()
return self._ids[item]
def __setitem__(self, item, value):
"""Overrides the ID for `item`."""
self._ids[item] = value
def __len__(self):
""""Returns the number of items"""
return len(self._ids)
def reverse_dict(self):
"""Returns the reverse mapping, i.e., the one that maps from generated
IDs to their corresponding objects"""
return dict((v, k) for k, v in self._ids.iteritems())
def values(self):
"""Returns the values stored so far. If the generator generates items
according to the standard sorting order, the values returned will be
exactly in the order they were added. This holds for integer IDs for
instance (but for many other ID generators as well)."""
return sorted(self._ids.keys(), key = self._ids.__getitem__)
add = __getitem__
|
igraph/xdata-igraph
|
interfaces/python/igraph/datatypes.py
|
Python
|
gpl-2.0
| 28,500
|
"""
Higher order classes and functions for Libvirt Sandbox (lxc) container testing
:copyright: 2013 Red Hat Inc.
"""
import datetime
import time
import logging
import lvsb_base
# This utility function lets test-modules quickly create a list of all
# sandbox aggregate types, themselves containing a list of individual
# sandboxes.
def make_sandboxes(params, env, extra_ns=None):
"""
Return list of instantiated lvsb_testsandboxes classes from params
:param params: an undiluted Params instance
:param env: the current env instance
:param extra_ns: An extra, optional namespace to search for classes
"""
namespace = globals() # stuff in this module
# For specialized sandbox types, allow their class to be defined
# inside test module or elsewhere.
if extra_ns is not None:
namespace.update(extra_ns) # copy in additional symbols
names = namespace.keys()
# Test may require more than one sandbox agregator class
pobs = params.objects('lvsb_testsandboxes') # manditory parameter
# filter out non-TestSandboxes subclasses
for name in names:
try:
if not issubclass(namespace[name], lvsb_base.TestSandboxes):
# Working on name list, okay to modify dict
del namespace[name]
except TypeError:
# Symbol wasn't a class, just ignore it
pass
# Return a list of instantiated sandbox_testsandboxes's classes
return [namespace[type_name](params, env) for type_name in pobs]
# TestSandboxes subclasses defined below, or inside other namespaces like
# a test module. They simply help the test-module iterate over many
# aggregate manager classes and the sandboxes they contain.
class TestSimpleSandboxes(lvsb_base.TestSandboxes):
"""
Simplistic sandbox aggregate manager that just executes a command
"""
def __init__(self, params, env):
"""
Initialize to run, all SandboxCommandBase's
"""
super(TestSimpleSandboxes, self).__init__(params, env)
self.init_sandboxes() # create instances of SandboxCommandBase
# Point all of them at the same local uri
self.for_each(lambda sb: sb.add_optarg('-c', self.uri))
# Use each instances name() method to produce name argument
self.for_each(lambda sb: sb.add_optarg('-n', sb.name))
# Command should follow after a --
self.for_each(lambda sb: sb.add_mm())
# Each one gets the same command (that's why it's simple)
self.for_each(lambda sb: sb.add_pos(self.command))
def results(self, each_timeout=5):
"""
Run sandboxe(s), allowing each_timeout to complete, return output list
"""
# Sandboxes run asynchronously, prevent them from running forever
start = datetime.datetime.now()
total_timeout_seconds = each_timeout * self.count
timeout_at = start + datetime.timedelta(seconds=total_timeout_seconds)
# No need to write a method just to call the run method
self.for_each(lambda sb: sb.run())
while datetime.datetime.now() < timeout_at:
# Wait until number of running sandboxes is zero
if bool(self.are_running()):
time.sleep(0.1) # Don't busy-wait
continue
else: # none are running
break
# Needed for accurate time in logging message below
end = datetime.datetime.now()
# Needed for logging message if none exited before timeout
still_running = self.are_running()
# Cause all exited sessions to clean up when sb.stop() called
self.for_each(lambda sb: sb.auto_clean(True))
# If raise, auto_clean will make sure cleanup happens
if bool(still_running):
raise lvsb_base.SandboxException("%d of %d sandboxes are still "
"running after "
"the timeout of %d seconds."
% (still_running,
self.count,
total_timeout_seconds))
# Kill off all sandboxes, just to be safe
self.for_each(lambda sb: sb.stop())
logging.info("%d sandboxe(s) finished in %s", self.count,
end - start)
# Return a list of stdout contents from each
return self.for_each(lambda sb: sb.recv())
|
spiceqa/virt-test
|
virttest/lvsb.py
|
Python
|
gpl-2.0
| 4,505
|
# -*- coding: utf-8 -*-
# Copyright 2007-2021 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from hyperspy._signals.signal1d import Signal1D
from hyperspy._signals.signal2d import Signal2D
from hyperspy.decorators import lazifyTestClass
@lazifyTestClass
class Test2D:
def setup_method(self, method):
self.im = Signal2D(np.random.random((2, 3)))
def test_to_signal1D(self):
s = self.im.to_signal1D()
assert isinstance(s, Signal1D)
assert s.data.shape == self.im.data.T.shape
if not s._lazy:
assert s.data.flags["C_CONTIGUOUS"]
@lazifyTestClass
class Test3D:
def setup_method(self, method):
self.im = Signal2D(np.random.random((2, 3, 4)))
def test_to_signal1D(self):
s = self.im.to_signal1D()
assert isinstance(s, Signal1D)
assert s.data.shape == (3, 4, 2)
if not s._lazy:
assert s.data.flags["C_CONTIGUOUS"]
@lazifyTestClass
class Test4D:
def setup_method(self, method):
self.s = Signal2D(np.random.random((2, 3, 4, 5)))
def test_to_image(self):
s = self.s.to_signal1D()
assert isinstance(s, Signal1D)
assert s.data.shape == (3, 4, 5, 2)
if not s._lazy:
assert s.data.flags["C_CONTIGUOUS"]
|
thomasaarholt/hyperspy
|
hyperspy/tests/signals/test_image.py
|
Python
|
gpl-3.0
| 1,942
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for field in ('species', 'cultivar', 'other', 'gender', 'bloom_period',
'fruit_period', 'fact_sheet', 'plant_guide'):
orm.Species.objects.filter(**{field + '__isnull': True}) \
.update(**{field: ''})
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.audit': {
'Meta': {'object_name': 'Audit'},
'action': ('django.db.models.fields.IntegerField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_index': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'previous_value': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'ref': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Audit']", 'null': 'True'}),
'requires_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.benefitcurrencyconversion': {
'Meta': {'object_name': 'BenefitCurrencyConversion'},
'co2_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'currency_symbol': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'electricity_kwh_to_currency': ('django.db.models.fields.FloatField', [], {}),
'h20_gal_to_currency': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'natural_gas_kbtu_to_currency': ('django.db.models.fields.FloatField', [], {}),
'nox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'o3_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'pm10_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'sox_lb_to_currency': ('django.db.models.fields.FloatField', [], {}),
'voc_lb_to_currency': ('django.db.models.fields.FloatField', [], {})
},
u'treemap.boundary': {
'Meta': {'object_name': 'Boundary'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'sort_order': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.fieldpermission': {
'Meta': {'unique_together': "((u'model_name', u'field_name', u'role', u'instance'),)", 'object_name': 'FieldPermission'},
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'permission_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"})
},
u'treemap.instance': {
'Meta': {'object_name': 'Instance'},
'adjuncts_timestamp': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'basemap_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basemap_type': ('django.db.models.fields.CharField', [], {'default': "u'google'", 'max_length': '255'}),
'boundaries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.Boundary']", 'null': 'True', 'blank': 'True'}),
'bounds': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
'center_override': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857', 'null': 'True', 'blank': 'True'}),
'config': ('treemap.json_field.JSONField', [], {'blank': 'True'}),
'default_role': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'default_role'", 'to': u"orm['treemap.Role']"}),
'eco_benefits_conversion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.BenefitCurrencyConversion']", 'null': 'True', 'blank': 'True'}),
'geo_rev': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'itree_region_default': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'url_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['treemap.User']", 'null': 'True', 'through': u"orm['treemap.InstanceUser']", 'blank': 'True'})
},
u'treemap.instanceuser': {
'Meta': {'unique_together': "((u'instance', u'user'),)", 'object_name': 'InstanceUser'},
'admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'reputation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Role']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.itreecodeoverride': {
'Meta': {'unique_together': "((u'instance_species', u'region'),)", 'object_name': 'ITreeCodeOverride'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']"}),
'itree_code': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.ITreeRegion']"})
},
u'treemap.itreeregion': {
'Meta': {'object_name': 'ITreeRegion'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'geometry': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'srid': '3857'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'treemap.mapfeature': {
'Meta': {'object_name': 'MapFeature'},
'address_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'address_zip': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'feature_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'geom': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '3857', 'db_column': "u'the_geom_webmercator'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'udfs': (u'treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'})
},
u'treemap.mapfeaturephoto': {
'Meta': {'object_name': 'MapFeaturePhoto'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'map_feature': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.MapFeature']"}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'})
},
u'treemap.plot': {
'Meta': {'object_name': 'Plot', '_ormbases': [u'treemap.MapFeature']},
'length': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'mapfeature_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['treemap.MapFeature']", 'unique': 'True', 'primary_key': 'True'}),
'owner_orig_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.reputationmetric': {
'Meta': {'object_name': 'ReputationMetric'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'approval_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'denial_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'direct_write_score': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'treemap.role': {
'Meta': {'object_name': 'Role'},
'default_permission': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'rep_thresh': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.species': {
'Meta': {'object_name': 'Species'},
'bloom_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'cultivar': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fact_sheet': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fall_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'flower_conspicuous': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fruit_period': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'genus': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'max_dbh': ('django.db.models.fields.IntegerField', [], {'default': '200'}),
'max_height': ('django.db.models.fields.IntegerField', [], {'default': '800'}),
'native_status': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'other': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'otm_code': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'palatable_human': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'plant_guide': ('django.db.models.fields.URLField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'species': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'udfs': (u'treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'}),
'wildlife_value': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'treemap.staticpage': {
'Meta': {'object_name': 'StaticPage'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'treemap.tree': {
'Meta': {'object_name': 'Tree'},
'canopy_height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'date_planted': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_removed': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'diameter': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'height': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'plot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Plot']"}),
'readonly': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Species']", 'null': 'True', 'blank': 'True'}),
'udfs': (u'treemap.udf.UDFField', [], {'db_index': 'True', 'blank': 'True'})
},
u'treemap.treefavorite': {
'Meta': {'unique_together': "((u'user', u'tree'),)", 'object_name': 'TreeFavorite'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tree': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Tree']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.User']"})
},
u'treemap.treephoto': {
'Meta': {'object_name': 'TreePhoto', '_ormbases': [u'treemap.MapFeaturePhoto']},
u'mapfeaturephoto_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['treemap.MapFeaturePhoto']", 'unique': 'True', 'primary_key': 'True'}),
'tree': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Tree']"})
},
u'treemap.user': {
'Meta': {'object_name': 'User'},
'allow_email_contact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '30', 'blank': 'True'}),
'make_info_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'treemap.userdefinedcollectionvalue': {
'Meta': {'object_name': 'UserDefinedCollectionValue'},
'data': (u'django_hstore.fields.DictionaryField', [], {}),
'field_definition': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.UserDefinedFieldDefinition']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model_id': ('django.db.models.fields.IntegerField', [], {})
},
u'treemap.userdefinedfielddefinition': {
'Meta': {'object_name': 'UserDefinedFieldDefinition'},
'datatype': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['treemap.Instance']"}),
'iscollection': ('django.db.models.fields.BooleanField', [], {}),
'model_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['treemap']
symmetrical = True
|
ctaylo37/OTM2
|
opentreemap/treemap/migrations/0079_convert_species_null_values_to_empty_strings.py
|
Python
|
gpl-3.0
| 22,255
|
#!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import sys, os, re
from functools import partial
from PyQt5.Qt import (
QGridLayout, QToolButton, QIcon, QRadioButton, QMenu, QApplication, Qt,
QSize, QWidget, QLabel, QStackedLayout, QPainter, QRect, QVBoxLayout,
QCursor, QEventLoop, QKeySequence, pyqtSignal, QTimer, QHBoxLayout)
from calibre.ebooks.oeb.polish.container import Container
from calibre.ebooks.oeb.polish.utils import guess_type
from calibre.gui2 import info_dialog
from calibre.gui2.progress_indicator import ProgressIndicator
from calibre.gui2.tweak_book.editor import syntax_from_mime
from calibre.gui2.tweak_book.diff.view import DiffView
from calibre.gui2.tweak_book.widgets import Dialog
from calibre.gui2.widgets2 import HistoryLineEdit2
from calibre.utils.filenames import samefile
from calibre.utils.icu import numeric_sort_key
class BusyWidget(QWidget): # {{{
def __init__(self, parent):
QWidget.__init__(self, parent)
l = QVBoxLayout()
self.setLayout(l)
l.addStretch(10)
self.pi = ProgressIndicator(self, 128)
l.addWidget(self.pi, alignment=Qt.AlignHCenter)
self.dummy = QLabel('<h2>\xa0')
l.addSpacing(10)
l.addWidget(self.dummy, alignment=Qt.AlignHCenter)
l.addStretch(10)
self.text = _('Calculating differences, please wait...')
def paintEvent(self, ev):
br = ev.region().boundingRect()
QWidget.paintEvent(self, ev)
p = QPainter(self)
p.setClipRect(br)
f = p.font()
f.setBold(True)
f.setPointSize(20)
p.setFont(f)
p.setPen(Qt.SolidLine)
r = QRect(0, self.dummy.geometry().top() + 10, self.geometry().width(), 150)
p.drawText(r, Qt.AlignHCenter | Qt.AlignTop | Qt.TextSingleLine, self.text)
p.end()
# }}}
class Cache(object):
def __init__(self):
self._left, self._right = {}, {}
self.left, self.right = self._left.get, self._right.get
self.set_left, self.set_right = self._left.__setitem__, self._right.__setitem__
def changed_files(list_of_names1, list_of_names2, get_data1, get_data2):
list_of_names1, list_of_names2 = frozenset(list_of_names1), frozenset(list_of_names2)
changed_names = set()
cache = Cache()
common_names = list_of_names1.intersection(list_of_names2)
for name in common_names:
left, right = get_data1(name), get_data2(name)
if len(left) == len(right) and left == right:
continue
cache.set_left(name, left), cache.set_right(name, right)
changed_names.add(name)
removals = list_of_names1 - common_names
adds = set(list_of_names2 - common_names)
adata, rdata = {a:get_data2(a) for a in adds}, {r:get_data1(r) for r in removals}
ahash = {a:hash(d) for a, d in adata.iteritems()}
rhash = {r:hash(d) for r, d in rdata.iteritems()}
renamed_names, removed_names, added_names = {}, set(), set()
for name, rh in rhash.iteritems():
for n, ah in ahash.iteritems():
if ah == rh:
renamed_names[name] = n
adds.discard(n)
break
else:
cache.set_left(name, rdata[name])
removed_names.add(name)
for name in adds:
cache.set_right(name, adata[name])
added_names.add(name)
return cache, changed_names, renamed_names, removed_names, added_names
def get_decoded_raw(name):
from calibre.ebooks.chardet import xml_to_unicode, force_encoding
with open(name, 'rb') as f:
raw = f.read()
syntax = syntax_from_mime(name, guess_type(name))
if syntax is None:
try:
raw = raw.decode('utf-8')
except ValueError:
pass
elif syntax != 'raster_image':
if syntax in {'html', 'xml'}:
raw = xml_to_unicode(raw, verbose=True)[0]
else:
m = re.search(br"coding[:=]\s*([-\w.]+)", raw[:1024], flags=re.I)
if m is not None and m.group(1) != '8bit':
enc = m.group(1)
if enc == b'unicode':
enc = 'utf-8'
else:
enc = force_encoding(raw, verbose=True)
try:
raw = raw.decode(enc)
except (LookupError, ValueError):
pass
return raw, syntax
def file_diff(left, right):
(raw1, syntax1), (raw2, syntax2) = map(get_decoded_raw, (left, right))
if type(raw1) is not type(raw2):
raw1, raw2 = open(left, 'rb').read(), open(right, 'rb').read()
cache = Cache()
cache.set_left(left, raw1), cache.set_right(right, raw2)
changed_names = {} if raw1 == raw2 else {left:right}
return cache, {left:syntax1, right:syntax2}, changed_names, {}, set(), set()
def dir_diff(left, right):
ldata, rdata, lsmap, rsmap = {}, {}, {}, {}
for base, data, smap in ((left, ldata, lsmap), (right, rdata, rsmap)):
for dirpath, dirnames, filenames in os.walk(base):
for filename in filenames:
path = os.path.join(dirpath, filename)
name = os.path.relpath(path, base)
data[name], smap[name] = get_decoded_raw(path)
cache, changed_names, renamed_names, removed_names, added_names = changed_files(
ldata, rdata, ldata.get, rdata.get)
syntax_map = {name:lsmap[name] for name in changed_names}
syntax_map.update({name:lsmap[name] for name in renamed_names})
syntax_map.update({name:rsmap[name] for name in added_names})
syntax_map.update({name:lsmap[name] for name in removed_names})
return cache, syntax_map, changed_names, renamed_names, removed_names, added_names
def container_diff(left, right):
left_names, right_names = set(left.name_path_map), set(right.name_path_map)
if left.cloned or right.cloned:
# Since containers are often clones of each other, as a performance
# optimization, discard identical names that point to the same physical
# file, without needing to read the file's contents.
# First commit dirtied names
for c in (left, right):
Container.commit(c, keep_parsed=True)
samefile_names = {name for name in left_names & right_names if samefile(
left.name_path_map[name], right.name_path_map[name])}
left_names -= samefile_names
right_names -= samefile_names
cache, changed_names, renamed_names, removed_names, added_names = changed_files(
left_names, right_names, left.raw_data, right.raw_data)
def syntax(container, name):
mt = container.mime_map[name]
return syntax_from_mime(name, mt)
syntax_map = {name:syntax(left, name) for name in changed_names}
syntax_map.update({name:syntax(left, name) for name in renamed_names})
syntax_map.update({name:syntax(right, name) for name in added_names})
syntax_map.update({name:syntax(left, name) for name in removed_names})
return cache, syntax_map, changed_names, renamed_names, removed_names, added_names
def ebook_diff(path1, path2):
from calibre.ebooks.oeb.polish.container import get_container
left = get_container(path1, tweak_mode=True)
right = get_container(path2, tweak_mode=True)
return container_diff(left, right)
class Diff(Dialog):
revert_requested = pyqtSignal()
line_activated = pyqtSignal(object, object, object)
def __init__(self, revert_button_msg=None, parent=None, show_open_in_editor=False, show_as_window=False):
self.context = 3
self.beautify = False
self.apply_diff_calls = []
self.show_open_in_editor = show_open_in_editor
self.revert_button_msg = revert_button_msg
Dialog.__init__(self, _('Differences between books'), 'diff-dialog', parent=parent)
self.setWindowFlags(self.windowFlags() | Qt.WindowMinMaxButtonsHint)
if show_as_window:
self.setWindowFlags(Qt.Window)
self.view.line_activated.connect(self.line_activated)
def sizeHint(self):
geom = QApplication.instance().desktop().availableGeometry(self)
return QSize(int(0.9 * geom.width()), int(0.8 * geom.height()))
def setup_ui(self):
self.setWindowIcon(QIcon(I('diff.png')))
self.stacks = st = QStackedLayout(self)
self.busy = BusyWidget(self)
self.w = QWidget(self)
st.addWidget(self.busy), st.addWidget(self.w)
self.setLayout(st)
self.l = l = QGridLayout()
self.w.setLayout(l)
self.view = v = DiffView(self, show_open_in_editor=self.show_open_in_editor)
l.addWidget(v, l.rowCount(), 0, 1, -1)
r = l.rowCount()
self.bp = b = QToolButton(self)
b.setIcon(QIcon(I('back.png')))
b.clicked.connect(partial(self.view.next_change, -1))
b.setToolTip(_('Go to previous change') + ' [p]')
b.setText(_('&Previous change')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 0)
self.bn = b = QToolButton(self)
b.setIcon(QIcon(I('forward.png')))
b.clicked.connect(partial(self.view.next_change, 1))
b.setToolTip(_('Go to next change') + ' [n]')
b.setText(_('&Next change')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 1)
self.search = s = HistoryLineEdit2(self)
s.initialize('diff_search_history')
l.addWidget(s, r, 2)
s.setPlaceholderText(_('Search for text'))
s.returnPressed.connect(partial(self.do_search, False))
self.sbn = b = QToolButton(self)
b.setIcon(QIcon(I('arrow-down.png')))
b.clicked.connect(partial(self.do_search, False))
b.setToolTip(_('Find next match'))
b.setText(_('Next &match')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 3)
self.sbp = b = QToolButton(self)
b.setIcon(QIcon(I('arrow-up.png')))
b.clicked.connect(partial(self.do_search, True))
b.setToolTip(_('Find previous match'))
b.setText(_('P&revious match')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
l.addWidget(b, r, 4)
self.lb = b = QRadioButton(_('Left panel'), self)
b.setToolTip(_('Perform search in the left panel'))
l.addWidget(b, r, 5)
self.rb = b = QRadioButton(_('Right panel'), self)
b.setToolTip(_('Perform search in the right panel'))
l.addWidget(b, r, 6)
b.setChecked(True)
self.pb = b = QToolButton(self)
b.setIcon(QIcon(I('config.png')))
b.setText(_('&Options')), b.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
b.setToolTip(_('Change how the differences are displayed'))
b.setPopupMode(b.InstantPopup)
m = QMenu(b)
b.setMenu(m)
cm = self.cm = QMenu(_('Lines of context around each change'))
for i in (3, 5, 10, 50):
cm.addAction(_('Show %d lines of context') % i, partial(self.change_context, i))
cm.addAction(_('Show all text'), partial(self.change_context, None))
self.beautify_action = m.addAction('', self.toggle_beautify)
self.set_beautify_action_text()
m.addMenu(cm)
l.addWidget(b, r, 7)
self.hl = QHBoxLayout()
l.addLayout(self.hl, l.rowCount(), 0, 1, -1)
self.names = QLabel('')
self.hl.addWidget(self.names, r)
self.bb.setStandardButtons(self.bb.Close)
if self.revert_button_msg is not None:
self.rvb = b = self.bb.addButton(self.revert_button_msg, self.bb.ActionRole)
b.setIcon(QIcon(I('edit-undo.png'))), b.setAutoDefault(False)
b.clicked.connect(self.revert_requested)
b.clicked.connect(self.reject)
self.bb.button(self.bb.Close).setDefault(True)
self.hl.addWidget(self.bb, r)
self.view.setFocus(Qt.OtherFocusReason)
def break_cycles(self):
self.view = None
for x in ('revert_requested', 'line_activated'):
try:
getattr(self, x).disconnect()
except:
pass
def do_search(self, reverse):
text = unicode(self.search.text())
if not text.strip():
return
v = self.view.view.left if self.lb.isChecked() else self.view.view.right
v.search(text, reverse=reverse)
def change_context(self, context):
if context == self.context:
return
self.context = context
self.refresh()
def refresh(self):
with self:
self.view.clear()
for args, kwargs in self.apply_diff_calls:
kwargs['context'] = self.context
kwargs['beautify'] = self.beautify
self.view.add_diff(*args, **kwargs)
self.view.finalize()
def toggle_beautify(self):
self.beautify = not self.beautify
self.set_beautify_action_text()
self.refresh()
def set_beautify_action_text(self):
self.beautify_action.setText(
_('Beautify files before comparing them') if not self.beautify else
_('Do not beautify files before comparing'))
def __enter__(self):
self.stacks.setCurrentIndex(0)
self.busy.pi.startAnimation()
QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents | QEventLoop.ExcludeSocketNotifiers)
def __exit__(self, *args):
self.busy.pi.stopAnimation()
self.stacks.setCurrentIndex(1)
QApplication.restoreOverrideCursor()
def set_names(self, names):
if isinstance(names, tuple):
self.names.setText('%s <--> %s' % names)
else:
self.names.setText('')
def ebook_diff(self, path1, path2, names=None):
self.set_names(names)
with self:
identical = self.apply_diff(_('The books are identical'), *ebook_diff(path1, path2))
self.view.finalize()
if identical:
self.reject()
def container_diff(self, left, right, identical_msg=None, names=None):
self.set_names(names)
with self:
identical = self.apply_diff(identical_msg or _('No changes found'), *container_diff(left, right))
self.view.finalize()
if identical:
self.reject()
def file_diff(self, left, right, identical_msg=None):
with self:
identical = self.apply_diff(identical_msg or _('The files are identical'), *file_diff(left, right))
self.view.finalize()
if identical:
self.reject()
def dir_diff(self, left, right, identical_msg=None):
with self:
identical = self.apply_diff(identical_msg or _('The directories are identical'), *dir_diff(left, right))
self.view.finalize()
if identical:
self.reject()
def apply_diff(self, identical_msg, cache, syntax_map, changed_names, renamed_names, removed_names, added_names):
self.view.clear()
self.apply_diff_calls = calls = []
def add(args, kwargs):
self.view.add_diff(*args, **kwargs)
calls.append((args, kwargs))
if len(changed_names) + len(renamed_names) + len(removed_names) + len(added_names) < 1:
info_dialog(self, _('No changes found'), identical_msg, show=True)
return True
kwargs = lambda name: {'context':self.context, 'beautify':self.beautify, 'syntax':syntax_map.get(name, None)}
if isinstance(changed_names, dict):
for name, other_name in sorted(changed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])):
args = (name, other_name, cache.left(name), cache.right(other_name))
add(args, kwargs(name))
else:
for name in sorted(changed_names, key=numeric_sort_key):
args = (name, name, cache.left(name), cache.right(name))
add(args, kwargs(name))
for name in sorted(added_names, key=numeric_sort_key):
args = (_('[%s was added]') % name, name, None, cache.right(name))
add(args, kwargs(name))
for name in sorted(removed_names, key=numeric_sort_key):
args = (name, _('[%s was removed]') % name, cache.left(name), None)
add(args, kwargs(name))
for name, new_name in sorted(renamed_names.iteritems(), key=lambda x:numeric_sort_key(x[0])):
args = (name, new_name, None, None)
add(args, kwargs(name))
def keyPressEvent(self, ev):
if not self.view.handle_key(ev):
if ev.key() in (Qt.Key_Enter, Qt.Key_Return):
return # The enter key is used by the search box, so prevent it closing the dialog
if ev.key() == Qt.Key_Slash:
return self.search.setFocus(Qt.OtherFocusReason)
if ev.matches(QKeySequence.Copy):
text = self.view.view.left.selected_text + self.view.view.right.selected_text
if text:
QApplication.clipboard().setText(text)
return
if ev.matches(QKeySequence.FindNext):
self.sbn.click()
return
if ev.matches(QKeySequence.FindPrevious):
self.sbp.click()
return
return Dialog.keyPressEvent(self, ev)
def compare_books(path1, path2, revert_msg=None, revert_callback=None, parent=None, names=None):
d = Diff(parent=parent, revert_button_msg=revert_msg)
if revert_msg is not None:
d.revert_requested.connect(revert_callback)
QTimer.singleShot(0, partial(d.ebook_diff, path1, path2, names=names))
d.exec_()
try:
d.revert_requested.disconnect()
except:
pass
d.break_cycles()
def main(args=sys.argv):
from calibre.gui2 import Application
left, right = args[-2:]
ext1, ext2 = left.rpartition('.')[-1].lower(), right.rpartition('.')[-1].lower()
if ext1.startswith('original_'):
ext1 = ext1.partition('_')[-1]
if ext2.startswith('original_'):
ext2 = ext2.partition('_')[-2]
if os.path.isdir(left):
attr = 'dir_diff'
elif (ext1, ext2) in {('epub', 'epub'), ('azw3', 'azw3')}:
attr = 'ebook_diff'
else:
attr = 'file_diff'
app = Application([]) # noqa
d = Diff(show_as_window=True)
func = getattr(d, attr)
QTimer.singleShot(0, lambda : func(left, right))
d.exec_()
return 0
if __name__ == '__main__':
main()
|
drxaero/calibre
|
src/calibre/gui2/tweak_book/diff/main.py
|
Python
|
gpl-3.0
| 18,741
|
#!/usr/bin/env python
"""
script to build the latest binaries for each vehicle type, ready to upload
Peter Barker, August 2017
based on build_binaries.sh by Andrew Tridgell, March 2013
AP_FLAKE8_CLEAN
"""
from __future__ import print_function
import datetime
import optparse
import os
import re
import shutil
import time
import string
import subprocess
import sys
import traceback
import gzip
# local imports
import generate_manifest
import gen_stable
import build_binaries_history
import board_list
from board_list import AP_PERIPH_BOARDS
if sys.version_info[0] < 3:
running_python3 = False
else:
running_python3 = True
def is_chibios_build(board):
'''see if a board is using HAL_ChibiOS'''
# cope with both running from Tools/scripts or running from cwd
hwdef_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "libraries", "AP_HAL_ChibiOS", "hwdef")
if os.path.exists(os.path.join(hwdef_dir, board, "hwdef.dat")):
return True
hwdef_dir = os.path.join("libraries", "AP_HAL_ChibiOS", "hwdef")
if os.path.exists(os.path.join(hwdef_dir, board, "hwdef.dat")):
return True
return False
def get_required_compiler(vehicle, tag, board):
'''return required compiler for a build tag.
return format is the version string that waf configure will detect.
You should setup a link from this name in $HOME/arm-gcc directory pointing at the
appropriate compiler
'''
if not is_chibios_build(board):
# only override compiler for ChibiOS builds
return None
if vehicle == 'Sub' and tag in ['stable', 'beta']:
# sub stable and beta is on the old compiler
return "g++-6.3.1"
# use 10.2.1 compiler for all other builds
return "g++-10.2.1"
class build_binaries(object):
def __init__(self, tags):
self.tags = tags
self.dirty = False
self.board_list = board_list.BoardList()
def progress(self, string):
'''pretty-print progress'''
print("BB: %s" % string)
def run_git(self, args):
'''run git with args git_args; returns git's output'''
cmd_list = ["git"]
cmd_list.extend(args)
return self.run_program("BB-GIT", cmd_list)
def board_branch_bit(self, board):
'''return a fragment which might modify the branch name.
this was previously used to have a master-AVR branch etc
if the board type was apm1 or apm2'''
return None
def board_options(self, board):
'''return board-specific options'''
if board in ["bebop", "disco"]:
return ["--static"]
return []
def run_waf(self, args, compiler=None):
if os.path.exists("waf"):
waf = "./waf"
else:
waf = os.path.join(".", "modules", "waf", "waf-light")
cmd_list = [waf]
cmd_list.extend(args)
env = None
if compiler is not None:
# default to $HOME/arm-gcc, but allow for any path with AP_GCC_HOME environment variable
gcc_home = os.environ.get("AP_GCC_HOME", os.path.join(os.environ["HOME"], "arm-gcc"))
gcc_path = os.path.join(gcc_home, compiler, "bin")
if os.path.exists(gcc_path):
# setup PATH to point at the right compiler, and setup to use ccache
env = os.environ.copy()
env["PATH"] = gcc_path + ":" + env["PATH"]
env["CC"] = "ccache arm-none-eabi-gcc"
env["CXX"] = "ccache arm-none-eabi-g++"
else:
raise Exception("BB-WAF: Missing compiler %s" % gcc_path)
self.run_program("BB-WAF", cmd_list, env=env)
def run_program(self, prefix, cmd_list, show_output=True, env=None):
if show_output:
self.progress("Running (%s)" % " ".join(cmd_list))
p = subprocess.Popen(cmd_list, bufsize=1, stdin=None,
stdout=subprocess.PIPE, close_fds=True,
stderr=subprocess.STDOUT, env=env)
output = ""
while True:
x = p.stdout.readline()
if len(x) == 0:
returncode = os.waitpid(p.pid, 0)
if returncode:
break
# select not available on Windows... probably...
time.sleep(0.1)
continue
if running_python3:
x = bytearray(x)
x = filter(lambda x : chr(x) in string.printable, x)
x = "".join([chr(c) for c in x])
output += x
x = x.rstrip()
if show_output:
print("%s: %s" % (prefix, x))
(_, status) = returncode
if status != 0 and show_output:
self.progress("Process failed (%s)" %
str(returncode))
raise subprocess.CalledProcessError(
returncode, cmd_list)
return output
def run_make(self, args):
cmd_list = ["make"]
cmd_list.extend(args)
self.run_program("BB-MAKE", cmd_list)
def run_git_update_submodules(self):
'''if submodules are present initialise and update them'''
if os.path.exists(os.path.join(self.basedir, ".gitmodules")):
self.run_git(["submodule",
"update",
"--init",
"--recursive",
"-f"])
def checkout(self, vehicle, ctag, cboard=None, cframe=None, submodule_update=True):
'''attempt to check out a git tree. Various permutations are
attempted based on ctag - for examplle, if the board is avr and ctag
is bob we will attempt to checkout bob-AVR'''
if self.dirty:
self.progress("Skipping checkout for dirty build")
return True
self.progress("Trying checkout %s %s %s %s" %
(vehicle, ctag, cboard, cframe))
self.run_git(['stash'])
if ctag == "latest":
vtag = "master"
else:
tagvehicle = vehicle
if tagvehicle == "Rover":
# FIXME: Rover tags in git still named APMrover2 :-(
tagvehicle = "APMrover2"
vtag = "%s-%s" % (tagvehicle, ctag)
branches = []
if cframe is not None:
# try frame specific tag
branches.append("%s-%s" % (vtag, cframe))
if cboard is not None:
bbb = self.board_branch_bit(cboard)
if bbb is not None:
# try board type specific branch extension
branches.append("".join([vtag, bbb]))
branches.append(vtag)
for branch in branches:
try:
self.progress("Trying branch %s" % branch)
self.run_git(["checkout", "-f", branch])
if submodule_update:
self.run_git_update_submodules()
self.run_git(["log", "-1"])
return True
except subprocess.CalledProcessError:
self.progress("Checkout branch %s failed" % branch)
self.progress("Failed to find tag for %s %s %s %s" %
(vehicle, ctag, cboard, cframe))
return False
def skip_board_waf(self, board):
'''check if we should skip this build because we do not support the
board in this release
'''
try:
out = self.run_program('waf', ['./waf', 'configure', '--board=BOARDTEST'], False)
lines = out.split('\n')
needles = ["BOARDTEST' (choose from", "BOARDTEST': choices are"]
for line in lines:
for needle in needles:
idx = line.find(needle)
if idx != -1:
break
if idx != -1:
line = line[idx+len(needle):-1]
line = line.replace("'", "")
line = line.replace(" ", "")
boards = line.split(",")
return board not in boards
except IOError as e:
if e.errno != 2:
raise
self.progress("Skipping unsupported board %s" % (board,))
return True
def skip_frame(self, board, frame):
'''returns true if this board/frame combination should not be built'''
if frame == "heli":
if board in ["bebop", "aerofc-v1", "skyviper-v2450", "CubeSolo", "CubeGreen-solo", 'skyviper-journey']:
self.progress("Skipping heli build for %s" % board)
return True
return False
def first_line_of_filepath(self, filepath):
'''returns the first (text) line from filepath'''
with open(filepath) as fh:
line = fh.readline()
return line
def skip_build(self, buildtag, builddir):
'''check if we should skip this build because we have already built
this version
'''
if os.getenv("FORCE_BUILD", False):
return False
if not os.path.exists(os.path.join(self.basedir, '.gitmodules')):
self.progress("Skipping build without submodules")
return True
bname = os.path.basename(builddir)
ldir = os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(builddir))), buildtag, bname) # FIXME: WTF
oldversion_filepath = os.path.join(ldir, "git-version.txt")
if not os.path.exists(oldversion_filepath):
self.progress("%s doesn't exist - building" % oldversion_filepath)
return False
oldversion = self.first_line_of_filepath(oldversion_filepath)
newversion = self.run_git(["log", "-1"])
newversion = newversion.splitlines()[0]
oldversion = oldversion.rstrip()
newversion = newversion.rstrip()
self.progress("oldversion=%s newversion=%s" %
(oldversion, newversion,))
if oldversion == newversion:
self.progress("Skipping build - version match (%s)" %
(newversion,))
return True
self.progress("%s needs rebuild" % (ldir,))
return False
def write_string_to_filepath(self, string, filepath):
'''writes the entirety of string to filepath'''
with open(filepath, "w") as x:
x.write(string)
def version_h_path(self, src):
'''return path to version.h'''
if src == 'AP_Periph':
return os.path.join('Tools', src, "version.h")
return os.path.join(src, "version.h")
def addfwversion_gitversion(self, destdir, src):
# create git-version.txt:
gitlog = self.run_git(["log", "-1"])
gitversion_filepath = os.path.join(destdir, "git-version.txt")
gitversion_content = gitlog
versionfile = self.version_h_path(src)
if os.path.exists(versionfile):
content = self.read_string_from_filepath(versionfile)
match = re.search('define.THISFIRMWARE "([^"]+)"', content)
if match is None:
self.progress("Failed to retrieve THISFIRMWARE from version.h")
self.progress("Content: (%s)" % content)
self.progress("Writing version info to %s" %
(gitversion_filepath,))
gitversion_content += "\nAPMVERSION: %s\n" % (match.group(1))
else:
self.progress("%s does not exist" % versionfile)
self.write_string_to_filepath(gitversion_content, gitversion_filepath)
def addfwversion_firmwareversiontxt(self, destdir, src):
# create firmware-version.txt
versionfile = self.version_h_path(src)
if not os.path.exists(versionfile):
self.progress("%s does not exist" % (versionfile,))
return
ss = r".*define +FIRMWARE_VERSION[ ]+(?P<major>\d+)[ ]*,[ ]*" \
r"(?P<minor>\d+)[ ]*,[ ]*(?P<point>\d+)[ ]*,[ ]*" \
r"(?P<type>[A-Z_]+)[ ]*"
content = self.read_string_from_filepath(versionfile)
match = re.search(ss, content)
if match is None:
self.progress("Failed to retrieve FIRMWARE_VERSION from version.h")
self.progress("Content: (%s)" % content)
return
ver = "%d.%d.%d-%s\n" % (int(match.group("major")),
int(match.group("minor")),
int(match.group("point")),
match.group("type"))
firmware_version_filepath = "firmware-version.txt"
self.progress("Writing version (%s) to %s" %
(ver, firmware_version_filepath,))
self.write_string_to_filepath(
ver, os.path.join(destdir, firmware_version_filepath))
def addfwversion(self, destdir, src):
'''write version information into destdir'''
self.addfwversion_gitversion(destdir, src)
self.addfwversion_firmwareversiontxt(destdir, src)
def read_string_from_filepath(self, filepath):
'''returns content of filepath as a string'''
with open(filepath, 'rb') as fh:
content = fh.read()
if running_python3:
return content.decode('ascii')
return content
def string_in_filepath(self, string, filepath):
'''returns true if string exists in the contents of filepath'''
return string in self.read_string_from_filepath(filepath)
def mkpath(self, path):
'''make directory path and all elements leading to it'''
'''distutils.dir_util.mkpath was playing up'''
try:
os.makedirs(path)
except OSError as e:
if e.errno != 17: # EEXIST
raise e
def copyit(self, afile, adir, tag, src):
'''copies afile into various places, adding metadata'''
bname = os.path.basename(adir)
tdir = os.path.join(os.path.dirname(os.path.dirname(
os.path.dirname(adir))), tag, bname)
if tag == "latest":
# we keep a permanent archive of all "latest" builds,
# their path including a build timestamp:
self.mkpath(adir)
self.progress("Copying %s to %s" % (afile, adir,))
shutil.copy(afile, adir)
self.addfwversion(adir, src)
# the most recent build of every tag is kept around:
self.progress("Copying %s to %s" % (afile, tdir))
self.mkpath(tdir)
self.addfwversion(tdir, src)
shutil.copy(afile, tdir)
def touch_filepath(self, filepath):
'''creates a file at filepath, or updates the timestamp on filepath'''
if os.path.exists(filepath):
os.utime(filepath, None)
else:
with open(filepath, "a"):
pass
def build_vehicle(self, tag, vehicle, boards, vehicle_binaries_subdir,
binaryname, frames=[None]):
'''build vehicle binaries'''
self.progress("Building %s %s binaries (cwd=%s)" %
(vehicle, tag, os.getcwd()))
board_count = len(boards)
count = 0
for board in sorted(boards, key=str.lower):
now = datetime.datetime.now()
count += 1
self.progress("[%u/%u] Building board: %s at %s" %
(count, board_count, board, str(now)))
for frame in frames:
if frame is not None:
self.progress("Considering frame %s for board %s" %
(frame, board))
if frame is None:
framesuffix = ""
else:
framesuffix = "-%s" % frame
if not self.checkout(vehicle, tag, board, frame, submodule_update=False):
msg = ("Failed checkout of %s %s %s %s" %
(vehicle, board, tag, frame,))
self.progress(msg)
self.error_strings.append(msg)
continue
self.progress("Building %s %s %s binaries %s" %
(vehicle, tag, board, frame))
ddir = os.path.join(self.binaries,
vehicle_binaries_subdir,
self.hdate_ym,
self.hdate_ymdhm,
"".join([board, framesuffix]))
if self.skip_build(tag, ddir):
continue
if self.skip_frame(board, frame):
continue
# we do the submodule update after the skip_board_waf check to avoid doing it on
# builds we will not be running
self.run_git_update_submodules()
if self.skip_board_waf(board):
continue
if os.path.exists(self.buildroot):
shutil.rmtree(self.buildroot)
self.remove_tmpdir()
githash = self.run_git(["rev-parse", "HEAD"]).rstrip()
t0 = time.time()
self.progress("Configuring for %s in %s" %
(board, self.buildroot))
try:
waf_opts = ["configure",
"--board", board,
"--out", self.buildroot,
"clean"]
gccstring = get_required_compiler(vehicle, tag, board)
if gccstring is not None and gccstring.find("g++-6.3") == -1:
# versions using the old compiler don't have the --assert-cc-version option
waf_opts += ["--assert-cc-version", gccstring]
waf_opts.extend(self.board_options(board))
self.run_waf(waf_opts, compiler=gccstring)
except subprocess.CalledProcessError:
self.progress("waf configure failed")
continue
time_taken_to_configure = time.time() - t0
try:
target = os.path.join("bin",
"".join([binaryname, framesuffix]))
self.run_waf(["build", "--targets", target], compiler=gccstring)
except subprocess.CalledProcessError:
msg = ("Failed build of %s %s%s %s" %
(vehicle, board, framesuffix, tag))
self.progress(msg)
self.error_strings.append(msg)
# record some history about this build
t1 = time.time()
time_taken_to_build = t1-t0
self.history.record_build(githash, tag, vehicle, board, frame, None, t0, time_taken_to_build)
continue
time_taken_to_build = (time.time()-t0) - time_taken_to_configure
time_taken = time.time()-t0
self.progress("Making %s %s %s %s took %u seconds (configure=%u build=%u)" %
(vehicle, tag, board, frame, time_taken, time_taken_to_configure, time_taken_to_build))
bare_path = os.path.join(self.buildroot,
board,
"bin",
"".join([binaryname, framesuffix]))
files_to_copy = []
extensions = [".apj", ".abin", "_with_bl.hex", ".hex"]
if vehicle == 'AP_Periph':
# need bin file for uavcan-gui-tool and MissionPlanner
extensions.append('.bin')
for extension in extensions:
filepath = "".join([bare_path, extension])
if os.path.exists(filepath):
files_to_copy.append(filepath)
if not os.path.exists(bare_path):
raise Exception("No elf file?!")
# only copy the elf if we don't have other files to copy
if len(files_to_copy) == 0:
files_to_copy.append(bare_path)
for path in files_to_copy:
try:
self.copyit(path, ddir, tag, vehicle)
except Exception as e:
self.print_exception_caught(e)
self.progress("Failed to copy %s to %s: %s" % (path, ddir, str(e)))
# why is touching this important? -pb20170816
self.touch_filepath(os.path.join(self.binaries,
vehicle_binaries_subdir, tag))
# record some history about this build
self.history.record_build(githash, tag, vehicle, board, frame, bare_path, t0, time_taken_to_build)
self.checkout(vehicle, "latest")
def get_exception_stacktrace(self, e):
if sys.version_info[0] >= 3:
ret = "%s\n" % e
ret += ''.join(traceback.format_exception(type(e),
e,
tb=e.__traceback__))
return ret
# Python2:
return traceback.format_exc(e)
def print_exception_caught(self, e, send_statustext=True):
self.progress("Exception caught: %s" %
self.get_exception_stacktrace(e))
def AP_Periph_boards(self):
return AP_PERIPH_BOARDS
def build_arducopter(self, tag):
'''build Copter binaries'''
boards = []
boards.extend(["aerofc-v1", "bebop"])
boards.extend(self.board_list.find_autobuild_boards('Copter'))
self.build_vehicle(tag,
"ArduCopter",
boards,
"Copter",
"arducopter",
frames=[None, "heli"])
def build_arduplane(self, tag):
'''build Plane binaries'''
boards = self.board_list.find_autobuild_boards('Plane')[:]
boards.append("disco")
self.build_vehicle(tag,
"ArduPlane",
boards,
"Plane",
"arduplane")
def build_antennatracker(self, tag):
'''build Tracker binaries'''
self.build_vehicle(tag,
"AntennaTracker",
self.board_list.find_autobuild_boards('Tracker')[:],
"AntennaTracker",
"antennatracker")
def build_rover(self, tag):
'''build Rover binaries'''
self.build_vehicle(tag,
"Rover",
self.board_list.find_autobuild_boards('Rover')[:],
"Rover",
"ardurover")
def build_ardusub(self, tag):
'''build Sub binaries'''
self.build_vehicle(tag,
"ArduSub",
self.board_list.find_autobuild_boards('Sub')[:],
"Sub",
"ardusub")
def build_AP_Periph(self, tag):
'''build AP_Periph binaries'''
boards = self.AP_Periph_boards()
self.build_vehicle(tag,
"AP_Periph",
boards,
"AP_Periph",
"AP_Periph")
def build_blimp(self, tag):
'''build Blimp binaries'''
self.build_vehicle(tag,
"Blimp",
self.board_list.find_autobuild_boards('Blimp')[:],
"Blimp",
"blimp")
def generate_manifest(self):
'''generate manigest files for GCS to download'''
self.progress("Generating manifest")
base_url = 'https://firmware.ardupilot.org'
generator = generate_manifest.ManifestGenerator(self.binaries,
base_url)
content = generator.json()
new_json_filepath = os.path.join(self.binaries, "manifest.json.new")
self.write_string_to_filepath(content, new_json_filepath)
# provide a pre-compressed manifest. For reference, a 7M manifest
# "gzip -9"s to 300k in 1 second, "xz -e"s to 80k in 26 seconds
new_json_filepath_gz = os.path.join(self.binaries,
"manifest.json.gz.new")
with gzip.open(new_json_filepath_gz, 'wb') as gf:
if running_python3:
content = bytes(content, 'ascii')
gf.write(content)
json_filepath = os.path.join(self.binaries, "manifest.json")
json_filepath_gz = os.path.join(self.binaries, "manifest.json.gz")
shutil.move(new_json_filepath, json_filepath)
shutil.move(new_json_filepath_gz, json_filepath_gz)
self.progress("Manifest generation successful")
self.progress("Generating stable releases")
gen_stable.make_all_stable(self.binaries)
self.progress("Generate stable releases done")
def validate(self):
'''run pre-run validation checks'''
if "dirty" in self.tags:
if len(self.tags) > 1:
raise ValueError("dirty must be only tag if present (%s)" %
(str(self.tags)))
self.dirty = True
def pollute_env_from_file(self, filepath):
with open(filepath) as f:
for line in f:
try:
(name, value) = str.split(line, "=")
except ValueError as e:
self.progress("%s: split failed: %s" % (filepath, str(e)))
continue
value = value.rstrip()
self.progress("%s: %s=%s" % (filepath, name, value))
os.environ[name] = value
def remove_tmpdir(self):
if os.path.exists(self.tmpdir):
self.progress("Removing (%s)" % (self.tmpdir,))
shutil.rmtree(self.tmpdir)
def buildlogs_dirpath(self):
return os.getenv("BUILDLOGS",
os.path.join(os.getcwd(), "..", "buildlogs"))
def run(self):
self.validate()
self.mkpath(self.buildlogs_dirpath())
binaries_history_filepath = os.path.join(
self.buildlogs_dirpath(), "build_binaries_history.sqlite")
self.history = build_binaries_history.BuildBinariesHistory(binaries_history_filepath)
prefix_bin_dirpath = os.path.join(os.environ.get('HOME'),
"prefix", "bin")
origin_env_path = os.environ.get("PATH")
os.environ["PATH"] = ':'.join([prefix_bin_dirpath, origin_env_path,
"/bin", "/usr/bin"])
if 'BUILD_BINARIES_PATH' in os.environ:
self.tmpdir = os.environ['BUILD_BINARIES_PATH']
else:
self.tmpdir = os.path.join(os.getcwd(), 'build.tmp.binaries')
os.environ["TMPDIR"] = self.tmpdir
print(self.tmpdir)
self.remove_tmpdir()
self.progress("Building in %s" % self.tmpdir)
now = datetime.datetime.now()
self.progress(now)
if not self.dirty:
self.run_git(["checkout", "-f", "master"])
githash = self.run_git(["rev-parse", "HEAD"])
githash = githash.rstrip()
self.progress("git hash: %s" % str(githash))
self.hdate_ym = now.strftime("%Y-%m")
self.hdate_ymdhm = now.strftime("%Y-%m-%d-%H:%m")
self.mkpath(os.path.join("binaries", self.hdate_ym,
self.hdate_ymdhm))
self.binaries = os.path.join(self.buildlogs_dirpath(), "binaries")
self.basedir = os.getcwd()
self.error_strings = []
if os.path.exists("config.mk"):
# FIXME: narrow exception
self.pollute_env_from_file("config.mk")
if not self.dirty:
self.run_git_update_submodules()
self.buildroot = os.path.join(os.environ.get("TMPDIR"),
"binaries.build")
for tag in self.tags:
t0 = time.time()
self.build_arducopter(tag)
self.build_arduplane(tag)
self.build_rover(tag)
self.build_antennatracker(tag)
self.build_ardusub(tag)
self.build_AP_Periph(tag)
self.build_blimp(tag)
self.history.record_run(githash, tag, t0, time.time()-t0)
if os.path.exists(self.tmpdir):
shutil.rmtree(self.tmpdir)
self.generate_manifest()
for error_string in self.error_strings:
self.progress("%s" % error_string)
sys.exit(len(self.error_strings))
if __name__ == '__main__':
parser = optparse.OptionParser("build_binaries.py")
parser.add_option("", "--tags", action="append", type="string",
default=[], help="tags to build")
cmd_opts, cmd_args = parser.parse_args()
tags = cmd_opts.tags
if len(tags) == 0:
# FIXME: wedge this defaulting into parser somehow
tags = ["stable", "beta", "latest"]
bb = build_binaries(tags)
bb.run()
|
ArduPilot/ardupilot
|
Tools/scripts/build_binaries.py
|
Python
|
gpl-3.0
| 29,462
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
from django.core.management import execute_from_command_line
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "worldmap.settings")
execute_from_command_line(sys.argv)
|
waybarrios/worldmap
|
manage.py
|
Python
|
gpl-3.0
| 253
|
from .depth import *
from .camera import *
from .contact import *
from .imagefeature import *
from .arduino import *
|
poppy-project/pypot
|
pypot/sensor/__init__.py
|
Python
|
gpl-3.0
| 117
|
# Copyright (C) 2007-2012 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""The message storage service."""
from __future__ import absolute_import, unicode_literals
__metaclass__ = type
__all__ = [
'IMessage',
'IMessageStore',
]
from zope.interface import Interface, Attribute
class IMessageStore(Interface):
"""The interface of the global message storage service.
All messages that are stored in the system live in the message storage
service. A message stored in this service must have a Message-ID header.
The store writes an X-Message-ID-Hash header which contains the Base32
encoded SHA1 hash of the message's Message-ID header. Any existing
X-Message-ID-Hash header is overwritten.
Either the Message-ID or the X-Message-ID-Hash header can be used to
uniquely identify this message in the storage service. While it is
possible to see duplicate Message-IDs, this is never correct and the
service is allowed to drop any subsequent colliding messages, or overwrite
earlier messages with later ones.
The combination of the List-Archive header and either the Message-ID or
X-Message-ID-Hash header can be used to retrieve the message from the
internet facing interface for the message store. This can be considered a
globally unique URI to the message.
For example, a message with the following headers:
Message-ID: <87myycy5eh.fsf@uwakimon.sk.tsukuba.ac.jp>
Date: Wed, 04 Jul 2007 16:49:58 +0900
List-Archive: http://archive.example.com/
X-Message-ID-Hash: RXTJ357KFOTJP3NFJA6KMO65X7VQOHJI
the globally unique URI would be:
http://archive.example.com/RXTJ357KFOTJP3NFJA6KMO65X7VQOHJI
"""
def add(message):
"""Add the message to the store.
:param message: An email.message.Message instance containing at least
a unique Message-ID header. The message will be given an
X-Message-ID-Hash header, overriding any existing such header.
:returns: The calculated X-Message-ID-Hash header.
:raises ValueError: if the message is missing a Message-ID header.
The storage service is also allowed to raise this exception if it
find, but disallows collisions.
"""
def get_message_by_id(message_id):
"""Return the message with a matching Message-ID.
:param message_id: The Message-ID header contents to search for.
:returns: The message, or None if no matching message was found.
"""
def get_message_by_hash(message_id_hash):
"""Return the message with the matching X-Message-ID-Hash.
:param message_id_hash: The X-Message-ID-Hash header contents to
search for.
:returns: The message, or None if no matching message was found.
"""
def delete_message(message_id):
"""Remove the given message from the store.
:param message: The Message-ID of the mesage to delete from the store.
:raises LookupError: if there is no such message.
"""
messages = Attribute(
"""An iterator over all messages in this message store.""")
class IMessage(Interface):
"""The representation of an email message."""
message_id = Attribute("""The message's Message-ID header.""")
message_id_hash = Attribute("""The unique SHA1 hash of the message.""")
path = Attribute("""The filesystem path to the message object.""")
|
hcs/mailman
|
src/mailman/interfaces/messages.py
|
Python
|
gpl-3.0
| 4,126
|
#!/usr/bin/env python
from __future__ import absolute_import
from collections import OrderedDict
from linchpin.InventoryFilters.InventoryFilter import InventoryFilter
class Inventory(InventoryFilter):
DEFAULT_HOSTNAMES = ['public_ip']
def get_host_data(self, res, cfgs):
"""
Returns a dict of hostnames or IP addresses for use in an Ansible
inventory file, based on available data. Only a single hostname or IP
address will be returned per instance, so as to avoid duplicate runs of
Ansible on the same host via the generated inventory file.
Each hostname contains mappings of any variable that was defined in the
cfgs section of the PinFile (e.g. __IP__) to the value in the field that
corresponds with that variable in the cfgs.
By default, the hostname will be the public_ip field returned by gcloud
:param topo:
linchpin GCloud resource data
:param cfgs:
map of config options from PinFile
"""
if res['resource_group'] != 'gcloud':
return OrderedDict()
if res['role'] == 'gcloud_gce':
return self.get_gcloud_gce_host_data(res, cfgs)
else:
return OrderedDict()
def get_gcloud_gce_host_data(self, res, cfgs):
host_data = OrderedDict()
var_data = cfgs.get('gcloud', {})
if var_data is None:
var_data = {}
for instance in res['instance_data']:
host = self.get_hostname(instance, var_data,
self.DEFAULT_HOSTNAMES)
hostname_var = host[0]
hostname = host[1]
host_data[hostname] = {}
if '__IP__' not in list(var_data.keys()):
var_data['__IP__'] = hostname_var
host_data[hostname] = {}
self.set_config_values(host_data[hostname], instance, var_data)
return host_data
|
samvarankashyap/linch-pin
|
linchpin/provision/roles/gcloud/files/inventory.py
|
Python
|
gpl-3.0
| 1,959
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
import kodi
import plex
import emby
import nmj
import nmjv2
import synoindex
import synologynotifier
import pytivo
import growl
import prowl
from . import libnotify
import pushover
import boxcar
import boxcar2
import nma
import pushalot
import pushbullet
import freemobile
import tweet
import trakt
import emailnotify
from sickbeard.common import *
# home theater / nas
kodi_notifier = kodi.KODINotifier()
plex_notifier = plex.PLEXNotifier()
emby_notifier = emby.EMBYNotifier()
nmj_notifier = nmj.NMJNotifier()
nmjv2_notifier = nmjv2.NMJv2Notifier()
synoindex_notifier = synoindex.synoIndexNotifier()
synology_notifier = synologynotifier.synologyNotifier()
pytivo_notifier = pytivo.pyTivoNotifier()
# devices
growl_notifier = growl.GrowlNotifier()
prowl_notifier = prowl.ProwlNotifier()
libnotify_notifier = libnotify.LibnotifyNotifier()
pushover_notifier = pushover.PushoverNotifier()
boxcar_notifier = boxcar.BoxcarNotifier()
boxcar2_notifier = boxcar2.Boxcar2Notifier()
nma_notifier = nma.NMA_Notifier()
pushalot_notifier = pushalot.PushalotNotifier()
pushbullet_notifier = pushbullet.PushbulletNotifier()
freemobile_notifier = freemobile.FreeMobileNotifier()
# social
twitter_notifier = tweet.TwitterNotifier()
trakt_notifier = trakt.TraktNotifier()
email_notifier = emailnotify.EmailNotifier()
notifiers = [
libnotify_notifier, # Libnotify notifier goes first because it doesn't involve blocking on network activity.
kodi_notifier,
plex_notifier,
nmj_notifier,
nmjv2_notifier,
synoindex_notifier,
synology_notifier,
pytivo_notifier,
growl_notifier,
freemobile_notifier,
prowl_notifier,
pushover_notifier,
boxcar_notifier,
boxcar2_notifier,
nma_notifier,
pushalot_notifier,
pushbullet_notifier,
twitter_notifier,
trakt_notifier,
email_notifier,
]
def notify_download(ep_name):
for n in notifiers:
n.notify_download(ep_name)
def notify_subtitle_download(ep_name, lang):
for n in notifiers:
n.notify_subtitle_download(ep_name, lang)
def notify_snatch(ep_name):
for n in notifiers:
n.notify_snatch(ep_name)
def notify_git_update(new_version=""):
for n in notifiers:
n.notify_git_update(new_version)
|
keen99/SickRage
|
sickbeard/notifiers/__init__.py
|
Python
|
gpl-3.0
| 3,001
|
from django.conf import settings
from django.conf.urls.defaults import handler500, handler404, patterns, include, \
url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^jsi18n/(?P<packages>\S+?)/$', 'django.views.i18n.javascript_catalog'),
url(r'^media/cms/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.CMS_MEDIA_ROOT, 'show_indexes': True}),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'^', include('cms.test_utils.project.second_cms_urls_for_apphook_tests')),
)
|
hzlf/openbroadcast
|
website/cms/test_utils/project/second_urls_for_apphook_tests.py
|
Python
|
gpl-3.0
| 696
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_move_line
import account_move_reconcile
import cash_flow_type
import cash_flow_distribution
import report
import wizard
|
sysadminmatmoz/odoo-clearcorp
|
TODO-8.0/cash_flow_report/__init__.py
|
Python
|
agpl-3.0
| 1,165
|
"""
Module for code that should run during LMS startup
"""
# pylint: disable=unused-argument
from django.conf import settings
# Force settings to run so that the python path is modified
settings.INSTALLED_APPS # pylint: disable=pointless-statement
from openedx.core.lib.django_startup import autostartup
import edxmako
import logging
from monkey_patch import django_utils_translation
import analytics
log = logging.getLogger(__name__)
def run():
"""
Executed during django startup
"""
# Patch the xml libs.
from safe_lxml import defuse_xml_libs
defuse_xml_libs()
django_utils_translation.patch()
autostartup()
add_mimetypes()
if settings.FEATURES.get('USE_CUSTOM_THEME', False):
enable_theme()
if settings.FEATURES.get('USE_MICROSITES', False):
enable_microsites()
if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH', False):
enable_third_party_auth()
# Initialize Segment.io analytics module. Flushes first time a message is received and
# every 50 messages thereafter, or if 10 seconds have passed since last flush
if settings.FEATURES.get('SEGMENT_IO_LMS') and hasattr(settings, 'SEGMENT_IO_LMS_KEY'):
analytics.init(settings.SEGMENT_IO_LMS_KEY, flush_at=50)
def add_mimetypes():
"""
Add extra mimetypes. Used in xblock_resource.
If you add a mimetype here, be sure to also add it in cms/startup.py.
"""
import mimetypes
mimetypes.add_type('application/vnd.ms-fontobject', '.eot')
mimetypes.add_type('application/x-font-opentype', '.otf')
mimetypes.add_type('application/x-font-ttf', '.ttf')
mimetypes.add_type('application/font-woff', '.woff')
def enable_theme():
"""
Enable the settings for a custom theme, whose files should be stored
in ENV_ROOT/themes/THEME_NAME (e.g., edx_all/themes/stanford).
"""
# Workaround for setting THEME_NAME to an empty
# string which is the default due to this ansible
# bug: https://github.com/ansible/ansible/issues/4812
if settings.THEME_NAME == "":
settings.THEME_NAME = None
return
assert settings.FEATURES['USE_CUSTOM_THEME']
settings.FAVICON_PATH = 'themes/{name}/images/favicon.ico'.format(
name=settings.THEME_NAME
)
# Calculate the location of the theme's files
theme_root = settings.ENV_ROOT / "themes" / settings.THEME_NAME
# Include the theme's templates in the template search paths
settings.TEMPLATE_DIRS.insert(0, theme_root / 'templates')
edxmako.paths.add_lookup('main', theme_root / 'templates', prepend=True)
# Namespace the theme's static files to 'themes/<theme_name>' to
# avoid collisions with default edX static files
settings.STATICFILES_DIRS.append(
(u'themes/{}'.format(settings.THEME_NAME), theme_root / 'static')
)
# Include theme locale path for django translations lookup
settings.LOCALE_PATHS = (theme_root / 'conf/locale',) + settings.LOCALE_PATHS
def enable_microsites():
"""
Enable the use of microsites, which are websites that allow
for subdomains for the edX platform, e.g. foo.edx.org
"""
microsites_root = settings.MICROSITE_ROOT_DIR
microsite_config_dict = settings.MICROSITE_CONFIGURATION
for ms_name, ms_config in microsite_config_dict.items():
# Calculate the location of the microsite's files
ms_root = microsites_root / ms_name
ms_config = microsite_config_dict[ms_name]
# pull in configuration information from each
# microsite root
if ms_root.isdir():
# store the path on disk for later use
ms_config['microsite_root'] = ms_root
template_dir = ms_root / 'templates'
ms_config['template_dir'] = template_dir
ms_config['microsite_name'] = ms_name
log.info('Loading microsite {0}'.format(ms_root))
else:
# not sure if we have application logging at this stage of
# startup
log.error('Error loading microsite {0}. Directory does not exist'.format(ms_root))
# remove from our configuration as it is not valid
del microsite_config_dict[ms_name]
# if we have any valid microsites defined, let's wire in the Mako and STATIC_FILES search paths
if microsite_config_dict:
settings.TEMPLATE_DIRS.append(microsites_root)
edxmako.paths.add_lookup('main', microsites_root)
settings.STATICFILES_DIRS.insert(0, microsites_root)
def enable_third_party_auth():
"""
Enable the use of third_party_auth, which allows users to sign in to edX
using other identity providers. For configuration details, see
common/djangoapps/third_party_auth/settings.py.
"""
from third_party_auth import settings as auth_settings
auth_settings.apply_settings(settings)
|
htzy/bigfour
|
lms/startup.py
|
Python
|
agpl-3.0
| 4,878
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joel Grand-Guillaume
# Copyright 2011-2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import statement
|
akretion/bank-statement-reconcile
|
__unported__/account_statement_transactionid_completion/__init__.py
|
Python
|
agpl-3.0
| 964
|
# -*- coding: utf-8 -*-
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import test_account_payment_transfer_reconcile_batch
|
acsone/bank-payment
|
account_payment_transfer_reconcile_batch/tests/__init__.py
|
Python
|
agpl-3.0
| 150
|
from sympy.core.evalf import PrecisionExhausted, complex_accuracy
from sympy import pi, I, Symbol, Add, Rational, exp, sqrt, sin, cos, \
fibonacci, Integral, oo, E, atan, log, integrate, floor, ceiling, \
factorial, binomial, Sum, zeta, Catalan, Pow, GoldenRatio, sympify, \
sstr, Function, Eq, Mul, Pow, Derivative
from sympy.mpmath.libmp.libmpf import from_float
from sympy.utilities.pytest import raises
x = Symbol('x')
y = Symbol('y')
n = Symbol('n')
def NS(e, n=15, **options):
return sstr(sympify(e).evalf(n, **options), full_prec=True)
def test_evalf_helpers():
assert complex_accuracy((from_float(2.0),None,35,None)) == 35
assert complex_accuracy((from_float(2.0),from_float(10.0),35,100)) == 37
assert complex_accuracy((from_float(2.0),from_float(1000.0),35,100)) == 43
assert complex_accuracy((from_float(2.0),from_float(10.0),100,35)) == 35
assert complex_accuracy((from_float(2.0),from_float(1000.0),100,35)) == 35
def test_evalf_basic():
assert NS('pi',15) == '3.14159265358979'
assert NS('2/3',10) == '0.6666666667'
assert NS('355/113-pi',6) == '2.66764e-7'
assert NS('16*atan(1/5)-4*atan(1/239)', 15) == '3.14159265358979'
def test_cancellation():
assert NS(Add(pi,Rational(1,10**1000),-pi,evaluate=False),15,maxn=1200) == '1.00000000000000e-1000'
def test_evalf_powers():
assert NS('pi**(10**20)',10) == '1.339148777e+49714987269413385435'
assert NS(pi**(10**100),10) == ('4.946362032e+4971498726941338543512682882'
'9089887365167832438044244613405349992494711208'
'95526746555473864642912223')
assert NS('2**(1/10**50)',15) == '1.00000000000000'
assert NS('2**(1/10**50)-1',15) == '6.93147180559945e-51'
# Evaluation of Rump's ill-conditioned polynomial
def test_evalf_rump():
a = 1335*y**6/4+x**2*(11*x**2*y**2-y**6-121*y**4-2)+11*y**8/2+x/(2*y)
assert NS(a, 15, subs={x:77617, y:33096}) == '-0.827396059946821'
def test_evalf_complex():
assert NS('2*sqrt(pi)*I',10) == '3.544907702*I'
assert NS('3+3*I',15) == '3.00000000000000 + 3.00000000000000*I'
assert NS('E+pi*I',15) == '2.71828182845905 + 3.14159265358979*I'
assert NS('pi * (3+4*I)',15) == '9.42477796076938 + 12.5663706143592*I'
assert NS('I*(2+I)',15) == '-1.00000000000000 + 2.00000000000000*I'
#assert NS('(pi+E*I)*(E+pi*I)',15) in ('.0e-15 + 17.25866050002*I', '.0e-17 + 17.25866050002*I', '-.0e-17 + 17.25866050002*I')
assert NS('(pi+E*I)*(E+pi*I)',15,chop=True) == '17.2586605000200*I'
def test_evalf_complex_powers():
assert NS('(E+pi*I)**100000000000000000') == \
'-3.58896782867793e+61850354284995199 + 4.58581754997159e+61850354284995199*I'
# XXX: rewrite if a+a*I simplification introduced in sympy
#assert NS('(pi + pi*I)**2') in ('.0e-15 + 19.7392088021787*I', '.0e-16 + 19.7392088021787*I')
assert NS('(pi + pi*I)**2', chop=True) == '19.7392088021787*I'
assert NS('(pi + 1/10**8 + pi*I)**2') == '6.2831853e-8 + 19.7392088650106*I'
assert NS('(pi + 1/10**12 + pi*I)**2') == '6.283e-12 + 19.7392088021850*I'
#assert NS('(pi + pi*I)**4') == '-389.63636413601 + .0e-14*I'
assert NS('(pi + pi*I)**4', chop=True) == '-389.636364136010'
assert NS('(pi + 1/10**8 + pi*I)**4') == '-389.636366616512 + 2.4805021e-6*I'
assert NS('(pi + 1/10**12 + pi*I)**4') == '-389.636364136258 + 2.481e-10*I'
assert NS('(10000*pi + 10000*pi*I)**4', chop=True) == '-3.89636364136010e+18'
def test_evalf_exponentiation():
assert NS(sqrt(-pi)) == '1.77245385090552*I'
assert NS(Pow(pi*I, Rational(1,2), evaluate=False)) == '1.25331413731550 + 1.25331413731550*I'
assert NS(pi**I) == '0.413292116101594 + 0.910598499212615*I'
assert NS(pi**(E+I/3)) == '20.8438653991931 + 8.36343473930031*I'
assert NS((pi+I/3)**(E+I/3)) == '17.2442906093590 + 13.6839376767037*I'
assert NS(exp(pi)) == '23.1406926327793'
assert NS(exp(pi+E*I)) == '-21.0981542849657 + 9.50576358282422*I'
assert NS(pi**pi) == '36.4621596072079'
assert NS((-pi)**pi) == '-32.9138577418939 - 15.6897116534332*I'
assert NS((-pi)**(-pi)) == '-0.0247567717232697 + 0.0118013091280262*I'
# An example from Smith, "Multiple Precision Complex Arithmetic and Functions"
def test_evalf_complex_cancellation():
A = Rational('63287/100000')
B = Rational('52498/100000')
C = Rational('69301/100000')
D = Rational('83542/100000')
F = Rational('2231321613/2500000000')
# XXX: the number of returned mantissa digits in the real part could
# change with the implementation. What matters is that the returned digits are
# correct.
assert NS((A+B*I)*(C+D*I),6) == '6.44862e-6 + 0.892529*I'
assert NS((A+B*I)*(C+D*I),10) == '6.447099821e-6 + 0.8925286452*I'
assert NS((A+B*I)*(C+D*I) - F*I, 5) in ('6.4471e-6 - .0e-15*I', '6.4471e-6 + .0e-15*I')
def test_evalf_logs():
assert NS("log(3+pi*I)", 15) == '1.46877619736226 + 0.808448792630022*I'
assert NS("log(pi*I)", 15) == '1.14472988584940 + 1.57079632679490*I'
def test_evalf_trig():
assert NS('sin(1)',15) == '0.841470984807897'
assert NS('cos(1)',15) == '0.540302305868140'
assert NS('sin(10**-6)',15) == '9.99999999999833e-7'
assert NS('cos(10**-6)',15) == '0.999999999999500'
assert NS('sin(E*10**100)',15) == '0.409160531722613'
# Some input near roots
assert NS(sin(exp(pi*sqrt(163))*pi), 15) == '-2.35596641936785e-12'
assert NS(sin(pi*10**100 + Rational(7,10**5), evaluate=False), 15, maxn=120) == \
'6.99999999428333e-5'
assert NS(sin(Rational(7,10**5), evaluate=False), 15) == \
'6.99999999428333e-5'
# Check detection of various false identities
def test_evalf_near_integers():
# Binet's formula
f = lambda n: ((1+sqrt(5))**n)/(2**n * sqrt(5))
assert NS(f(5000) - fibonacci(5000), 10, maxn=1500) == '5.156009964e-1046'
# Some near-integer identities from
# http://mathworld.wolfram.com/AlmostInteger.html
assert NS('sin(2017*2**(1/5))',15) == '-1.00000000000000'
assert NS('sin(2017*2**(1/5))',20) == '-0.99999999999999997857'
assert NS('1+sin(2017*2**(1/5))',15) == '2.14322287389390e-17'
assert NS('45 - 613*E/37 + 35/991', 15) == '6.03764498766326e-11'
def test_evalf_ramanujan():
assert NS(exp(pi*sqrt(163)) - 640320**3 - 744, 10) == '-7.499274028e-13'
# A related identity
A = 262537412640768744*exp(-pi*sqrt(163))
B = 196884*exp(-2*pi*sqrt(163))
C = 103378831900730205293632*exp(-3*pi*sqrt(163))
assert NS(1-A-B+C,10) == '1.613679005e-59'
# Input that for various reasons have failed at some point
def test_evalf_bugs():
assert NS(sin(1)+exp(-10**10),10) == NS(sin(1),10)
assert NS(exp(10**10)+sin(1),10) == NS(exp(10**10),10)
assert NS('log(1+1/10**50)',20) == '1.0000000000000000000e-50'
assert NS('log(10**100,10)',10) == '100.0000000'
assert NS('log(2)',10) == '0.6931471806'
assert NS('(sin(x)-x)/x**3', 15, subs={x:'1/10**50'}) == '-0.166666666666667'
assert NS(sin(1)+Rational(1,10**100)*I,15) == '0.841470984807897 + 1.00000000000000e-100*I'
assert x.evalf() == x
assert NS((1+I)**2*I,6) == '-2.00000 + 2.32831e-10*I'
d={n: (-1)**Rational(6,7), y: (-1)**Rational(4,7), x: (-1)**Rational(2,7)}
assert NS((x*(1+y*(1 + n))).subs(d).evalf(),6) == '0.346011 + 0.433884*I'
assert NS(((-I-sqrt(2)*I)**2).evalf()) == '-5.82842712474619'
assert NS((1+I)**2*I,15) == '-2.00000000000000 + 2.16840434497101e-19*I'
#1659 (1/2):
assert NS(pi.evalf(69) - pi) == '-4.43863937855894e-71'
#1659 (2/2): With the bug present, this still only fails if the
# terms are in the order given here. This is not generally the case,
# because the order depends on the hashes of the terms.
assert NS(20 - 5008329267844*n**25 - 477638700*n**37 - 19*n,
subs={n:.01}) == '19.8100000000000'
def test_evalf_integer_parts():
a = floor(log(8)/log(2) - exp(-1000), evaluate=False)
b = floor(log(8)/log(2), evaluate=False)
raises(PrecisionExhausted, "a.evalf()")
assert a.evalf(chop=True) == 3
assert a.evalf(maxn=500) == 2
raises(PrecisionExhausted, "b.evalf()")
raises(PrecisionExhausted, "b.evalf(maxn=500)")
assert b.evalf(chop=True) == 3
assert int(floor(factorial(50)/E,evaluate=False).evalf()) == \
11188719610782480504630258070757734324011354208865721592720336800L
assert int(ceiling(factorial(50)/E,evaluate=False).evalf()) == \
11188719610782480504630258070757734324011354208865721592720336801L
assert int(floor((GoldenRatio**999 / sqrt(5) + Rational(1,2))).evalf(1000)) == fibonacci(999)
assert int(floor((GoldenRatio**1000 / sqrt(5) + Rational(1,2))).evalf(1000)) == fibonacci(1000)
def test_evalf_trig_zero_detection():
a = sin(160*pi, evaluate=False)
t = a.evalf(maxn=100)
assert abs(t) < 1e-100
assert t._prec < 2
assert a.evalf(chop=True) == 0
raises(PrecisionExhausted, "a.evalf(strict=True)")
def test_evalf_divergent_series():
n = Symbol('n', integer=True)
raises(ValueError, 'Sum(1/n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum(n/(n**2+1), (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((-1)**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((-1)**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum(n**2, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum(2**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((-2)**n, (n, 1, oo)).evalf()')
raises(ValueError, 'Sum((2*n+3)/(3*n**2+4), (n,0, oo)).evalf()')
raises(ValueError, 'Sum((0.5*n**3)/(n**4+1),(n,0,oo)).evalf()')
def test_evalf_py_methods():
assert abs(float(pi+1) - 4.1415926535897932) < 1e-10
assert abs(complex(pi+1) - 4.1415926535897932) < 1e-10
assert abs(complex(pi+E*I) - (3.1415926535897931+2.7182818284590451j)) < 1e-10
raises(ValueError, "float(pi+x)")
raises(ValueError, "complex(pi+x)")
def test_evalf_power_subs_bugs():
assert (x**2).evalf(subs={x:0}) == 0
assert sqrt(x).evalf(subs={x:0}) == 0
assert (x**Rational(2,3)).evalf(subs={x:0}) == 0
assert (x**x).evalf(subs={x:0}) == 1
assert (3**x).evalf(subs={x:0}) == 1
assert exp(x).evalf(subs={x:0}) == 1
assert ((2+I)**x).evalf(subs={x:0}) == 1
assert (0**x).evalf(subs={x:0}) == 1
def test_evalf_arguments():
raises(TypeError, 'pi.evalf(method="garbage")')
def test_implemented_function_evalf():
from sympy.utilities.lambdify import implemented_function
f = Function('f')
x = Symbol('x')
f = implemented_function(f, lambda x: x + 1)
assert str(f(x)) == "f(x)"
assert str(f(2)) == "f(2)"
assert f(2).evalf() == 3
assert f(x).evalf() == f(x)
del f._imp_ # XXX: due to caching _imp_ would influence all other tests
def test_evaluate_false():
for no in [[], 0, False, None]:
assert Add(3, 2, evaluate=no).is_Add
assert Mul(3, 2, evaluate=no).is_Mul
assert Pow(3, 2, evaluate=no).is_Pow
assert Pow(y, 2, evaluate=True) - Pow(y, 2, evaluate=True) == 0
def test_evalf_relational():
assert Eq(x/5, y/10).evalf() == Eq(0.2*x, 0.1*y)
def test_issue_2387():
assert not cos(sqrt(0.5 + I)).n().is_Function
def test_issue_2387_bug():
from sympy import I, Expr
assert abs(Expr._from_mpmath(I._to_mpmath(15), 15) - I) < 1.0e-15
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sympy/core/tests/test_evalf.py
|
Python
|
agpl-3.0
| 11,270
|
registry = {}
def register(model, fields, order='pk', filter=False, results=5):
registry[str(model)] = (model, fields, results, order, filter)
class LoopBreak(Exception): pass
def search_for_string(search_string):
search_string = search_string.lower()
matches = []
for key in registry:
model, fields, results, order, filter_by = registry[key]
# partial application didn't seem sane in python ... so:
if filter_by:
if callable(filter_by):
filter_by = filter_by()
objects = model.objects.filter(filter_by)
else:
objects = model.objects.all()
counter = 0
try:
for object in objects.order_by(order):
for field in fields:
try:
searchee = getattr(object, field)
except AttributeError:
pass
if callable(searchee):
searchee = searchee()
if search_string in searchee.lower():
matches.append(object)
counter += 1
if counter >= results:
raise LoopBreak()
except LoopBreak:
pass
return matches
|
UWCS/uwcs-website
|
uwcs_website/search/__init__.py
|
Python
|
agpl-3.0
| 1,310
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Dtbuild3(Package):
"""Simple package which acts as a build dependency"""
homepage = "http://www.example.com"
url = "http://www.example.com/dtbuild3-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
|
iulian787/spack
|
var/spack/repos/builtin.mock/packages/dtbuild3/package.py
|
Python
|
lgpl-2.1
| 455
|
#!/usr/bin/python3
import argparse as ap
import shared
ACTIONS = dict()
def action(key):
def wrapper(function):
ACTIONS[key] = function
return function
return wrapper
def get_closed_issues(repo, milestone):
issues_and_prs = repo.get_issues(milestone=milestone, state="closed")
issues_only = [i for i in issues_and_prs if i.pull_request is None]
return issues_only
def get_closed_prs(repo, milestone):
issues_and_prs = repo.get_issues(milestone=milestone, state="closed")
prs_only = [i for i in issues_and_prs if i.pull_request is not None]
return prs_only
@action("issues-closed")
def print_closed_issues(repo, milestone):
for issue in get_closed_issues(repo, milestone):
print(issue.title)
@action("prs-merged")
def print_closed_prs(repo, milestone):
for pr in get_closed_prs(repo, milestone):
print(pr.title)
def create_parser():
parser = ap.ArgumentParser()
parser.add_argument("version", type=shared.version_type)
parser.add_argument("what", choices=(ACTIONS.keys()))
shared.update_parser_with_common_stuff(parser)
return parser
if __name__ == "__main__":
parser = create_parser()
args = parser.parse_args()
gh = shared.get_github(args)
repo = shared.get_repo(gh, "OpenSCAP")
milestone = shared.get_milestone(repo, args.version)
ACTIONS[args.what](repo, milestone)
|
mpreisler/openscap
|
release_tools/query-milestones.py
|
Python
|
lgpl-2.1
| 1,404
|
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(name='portopy',
version='0.1',
description='Python API for porto',
long_description=readme(),
url='https://github.com/yandex/porto',
author='marchael',
author_email='marchael@yandex-team.ru',
license='none',
packages=['porto'],
install_requires=[
'protobuf',
],
zip_safe=False)
|
dreamer-dead/porto
|
src/api/python/setup.py
|
Python
|
lgpl-3.0
| 440
|
"""Run Valgrind on all demos."""
# Copyright (C) 2008 Ilmar Wilbers
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
#
# Modified by Anders Logg 2008
# Modified by Dag Lindbo 2008
# Modified by Johannes Ring 2008
# Modified by Johan Hake 2009
#
# First added: 2008-04-08
# Last changed: 2009-05-19
import sys, os, re
import platform
from instant import get_status_output
if "--only-python" in sys.argv:
print "Skipping C++ only memory tests"
sys.exit()
if platform.system() in ['Darwin', 'Windows']:
print "No support for Valgrind on this platform."
sys.exit(0)
# Demos to run
cppdemos = []
for dpath, dnames, fnames in os.walk(os.path.join(os.curdir, "..", "..", "demo")):
if os.path.basename(dpath) == 'cpp':
if os.path.isfile(os.path.join(dpath, 'Makefile')):
cppdemos.append(dpath)
unit_test_excludes = ['graph']
# Python unit test to run
pythontests = []
for dpath, dnames, fnames in os.walk(os.path.join(os.curdir, "..", "unit")):
if os.path.basename(dpath) == 'python':
if os.path.isfile(os.path.join(dpath, 'test.py')):
pythontests.append(dpath)
pythontests = [test for test in pythontests if not any([exclude in test for exclude in unit_test_excludes])]
unit_test_excludes.append('meshconvert')
# cpp unit test to run
cpptests = []
for dpath, dnames, fnames in os.walk(os.path.join(os.curdir, "..", "unit")):
if os.path.basename(dpath) == 'cpp':
if os.path.isfile(os.path.join(dpath, 'test')):
cpptests.append(dpath)
cpptests = [test for test in cpptests if not any([exclude in test for exclude in unit_test_excludes])]
# Set non-interactive
os.putenv('DOLFIN_NOPLOT', '1')
# Helpful env vars
os.putenv('G_SLICE','always-malloc')
os.putenv('GLIBCXX_FORCE_NEW','1')
os.putenv('G_DEBUG','gc-friendly')
print pythontests
# Demos that need command line arguments are treated seperately
cppdemos.remove('./../../demo/undocumented/quadrature/cpp')
cppdemos.remove('./../../demo/undocumented/method-weights/cpp')
cppdemos.remove('./../../demo/undocumented/stiff/cpp')
# Demos that are too time consuming to Valgrind
cppdemos.remove('./../../demo/pde/cahn-hilliard/cpp')
cppdemos.remove('./../../demo/undocumented/elastodynamics/cpp')
cppdemos.remove('./../../demo/undocumented/reaction/cpp')
cppdemos.remove('./../../demo/undocumented/courtemanche/cpp')
re_def_lost = re.compile("definitely lost: 0 bytes in 0 blocks.")
re_pos_lost = re.compile("possibly lost: 0 bytes in 0 blocks.")
re_reachable = re.compile("still reachable: 0 bytes in 0 blocks.")
re_error = re.compile("0 errors from 0 contexts")
dolfin_supp = os.path.join(os.path.abspath(os.getcwd()), 'dolfin_valgrind.supp')
vg_comm = 'valgrind --error-exitcode=9 --tool=memcheck --leak-check=full --show-reachable=yes --suppressions=%s' % dolfin_supp
def run_and_analyse(path, run_str, prog_type, no_reachable_check = False):
output = get_status_output("cd %s && %s %s" % (path, vg_comm, run_str))
if "No such file or directory" in "".join([str(l) for l in output]):
print "*** FAILED: Unable to run demo"
return [(demo, "C++", output[1])]
if len(re.findall('All heap blocks were freed',output[1])) == 1:
print "OK"
return []
if "LEAK SUMMARY:" in output[1] and "ERROR SUMMARY:" in output[1]:
if re_def_lost.search(output[1]) and \
re_pos_lost.search(output[1]) and \
re_error.search(output[1]) and \
(no_reachable_check or re_reachable.search(output[1])):
print "OK"
return []
print "*** FAILED: Memory error"
return [(path, prog_type, output[1])]
failed = []
# Run C++ unittests
print "----------------------------------------------------------------------"
print "Running Valgrind on all C++ unittests"
print ""
print "Found %d C++ unittests" % len(cpptests)
print ""
for test_path in cpptests:
print "----------------------------------------------------------------------"
print "Running Valgrind on C++ unittest %s" % test_path
print ""
if os.path.isfile(os.path.join(test_path, 'test')):
failed += run_and_analyse(test_path,"./test","C++")
else:
print "*** Warning: missing test"
# Outcommenting the Python unittests due to troubles with valgrind suppresions
#
#print "----------------------------------------------------------------------"
#print "Running Valgrind on all Python unittests"
#print ""
#print "Found %d Python unittests" % len(pythontests)
#print ""
# Run Python unittests
#for test_path in pythontests:
# print "----------------------------------------------------------------------"
# print "Running Valgrind on Python unittest %s" % test_path
# print ""
# if os.path.isfile(os.path.join(test_path, 'test.py')):
# failed += run_and_analyse(test_path,"python test.py","Python",True)
# else:
# print "*** Warning: missing test"
# Run C++ demos
print "----------------------------------------------------------------------"
print "Running Valgrind on all demos (non-interactively)"
print ""
print "Found %d C++ demos" % len(cppdemos)
print ""
for demo_path in cppdemos:
print "----------------------------------------------------------------------"
print "Running Valgrind on C++ demo %s" % demo_path
print ""
demo_name = "./" + demo_path.split("/")[-2] + "-demo"
print demo_name
if os.path.isfile(os.path.join(demo_path, demo_name)):
failed += run_and_analyse(demo_path, demo_name, "C++")
else:
print "*** Warning: missing demo"
# Print output for failed tests
print ""
if len(failed) > 0:
print "%d demo(s) and/or unit test(s) failed memcheck, see memcheck.log for details." % len(failed)
file = open("memcheck.log", "w")
for (test, interface, output) in failed:
file.write("----------------------------------------------------------------------\n")
file.write("%s (%s)\n" % (test, interface))
file.write("\n")
file.write(output)
file.write("\n")
file.write("\n")
else:
print "All demos and unit tests checked for memory leaks and errors: OK"
# Return error code if tests failed
sys.exit(len(failed) != 0)
|
akshmakov/Dolfin-Fijee-Fork
|
test/memory/test.py
|
Python
|
lgpl-3.0
| 6,823
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Various utility functions used by this plugin"""
import subprocess
from os import environ, devnull
from os.path import expanduser
from .constants import PLATFORM
from .window_utils import get_pref
class NodeNotFoundError(OSError):
def __init__(self, original_exception, node_path):
msg = "Node.js was not found in the default path"
OSError.__init__(self, msg + (": %s" % original_exception))
self.node_path = node_path
class NodeRuntimeError(RuntimeError):
def __init__(self, stdout, stderr):
msg = "Node.js encountered a runtime error"
RuntimeError.__init__(self, msg + (": %s\n%s" % (stderr, stdout)))
self.stdout = stdout
self.stderr = stderr
class NodeSyntaxError(RuntimeError):
def __init__(self, stdout, stderr):
msg = "Node.js encountered a runtime syntax error"
RuntimeError.__init__(self, msg + (": %s\n%s" % (stderr, stdout)))
self.stdout = stdout
self.stderr = stderr
def get_node_path():
"""Gets the node.js path specified in this plugin's settings file"""
node = get_pref("node_path").get(PLATFORM)
return expanduser(node)
def run_command(args):
"""Runs a command in a shell and returns the output"""
popen_args = {
"stdout": subprocess.PIPE,
"stderr": subprocess.PIPE,
"env": environ,
}
if PLATFORM == "windows":
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
popen_args["startupinfo"] = startupinfo
popen_args["stdin"] = open(devnull, 'wb')
stdout, stderr = subprocess.Popen(args, **popen_args).communicate()
if stderr:
if b"ExperimentalWarning" in stderr:
# Don't treat node experimental warnings as actual errors.
return stdout
elif b"SyntaxError" in stderr:
raise NodeSyntaxError(
stdout.decode('utf-8'), stderr.decode('utf-8'))
else:
raise NodeRuntimeError(
stdout.decode('utf-8'), stderr.decode('utf-8'))
return stdout
def run_node_command(args):
"""Runs a node command in a shell and returns the output"""
node_path = get_node_path()
try:
stdout = run_command([node_path] + args)
except OSError as err:
if node_path in err.strerror or \
"No such file or directory" in err.strerror or \
"The system cannot find the file specified" in err.strerror:
raise NodeNotFoundError(err, node_path)
else:
raise err
return stdout
|
EnTeQuAk/dotfiles
|
sublime-text-3/Packages/HTML-CSS-JS Prettify/src/py/utils/env_utils.py
|
Python
|
unlicense
| 2,798
|
"""Test the Dyson air quality component."""
import json
from unittest import mock
import asynctest
from libpurecool.dyson_pure_cool import DysonPureCool
from libpurecool.dyson_pure_state_v2 import DysonEnvironmentalSensorV2State
import homeassistant.components.dyson.air_quality as dyson
from homeassistant.components import dyson as dyson_parent
from homeassistant.components.air_quality import DOMAIN as AIQ_DOMAIN, \
ATTR_PM_2_5, ATTR_PM_10, ATTR_NO2
from homeassistant.helpers import discovery
from homeassistant.setup import async_setup_component
def _get_dyson_purecool_device():
"""Return a valid device as provided by the Dyson web services."""
device = mock.Mock(spec=DysonPureCool)
device.serial = 'XX-XXXXX-XX'
device.name = 'Living room'
device.connect = mock.Mock(return_value=True)
device.auto_connect = mock.Mock(return_value=True)
device.environmental_state.particulate_matter_25 = '0014'
device.environmental_state.particulate_matter_10 = '0025'
device.environmental_state.nitrogen_dioxide = '0042'
device.environmental_state.volatile_organic_compounds = '0035'
return device
def _get_config():
"""Return a config dictionary."""
return {dyson_parent.DOMAIN: {
dyson_parent.CONF_USERNAME: 'email',
dyson_parent.CONF_PASSWORD: 'password',
dyson_parent.CONF_LANGUAGE: 'GB',
dyson_parent.CONF_DEVICES: [
{
'device_id': 'XX-XXXXX-XX',
'device_ip': '192.168.0.1'
}
]
}}
@asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True)
@asynctest.patch('libpurecool.dyson.DysonAccount.devices',
return_value=[_get_dyson_purecool_device()])
async def test_purecool_aiq_attributes(devices, login, hass):
"""Test state attributes."""
await async_setup_component(hass, dyson_parent.DOMAIN, _get_config())
await hass.async_block_till_done()
fan_state = hass.states.get("air_quality.living_room")
attributes = fan_state.attributes
assert fan_state.state == '14'
assert attributes[ATTR_PM_2_5] == 14
assert attributes[ATTR_PM_10] == 25
assert attributes[ATTR_NO2] == 42
assert attributes[dyson.ATTR_VOC] == 35
@asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True)
@asynctest.patch('libpurecool.dyson.DysonAccount.devices',
return_value=[_get_dyson_purecool_device()])
async def test_purecool_aiq_update_state(devices, login, hass):
"""Test state update."""
device = devices.return_value[0]
await async_setup_component(hass, dyson_parent.DOMAIN, _get_config())
await hass.async_block_till_done()
event = {
"msg": "ENVIRONMENTAL-CURRENT-SENSOR-DATA",
"time": "2019-03-29T10:00:01.000Z",
"data": {
"pm10": "0080",
"p10r": "0151",
"hact": "0040",
"va10": "0055",
"p25r": "0161",
"noxl": "0069",
"pm25": "0035",
"sltm": "OFF",
"tact": "2960"
}
}
device.environmental_state = \
DysonEnvironmentalSensorV2State(json.dumps(event))
for call in device.add_message_listener.call_args_list:
callback = call[0][0]
if type(callback.__self__) == dyson.DysonAirSensor:
callback(device.environmental_state)
await hass.async_block_till_done()
fan_state = hass.states.get("air_quality.living_room")
attributes = fan_state.attributes
assert fan_state.state == '35'
assert attributes[ATTR_PM_2_5] == 35
assert attributes[ATTR_PM_10] == 80
assert attributes[ATTR_NO2] == 69
assert attributes[dyson.ATTR_VOC] == 55
@asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True)
@asynctest.patch('libpurecool.dyson.DysonAccount.devices',
return_value=[_get_dyson_purecool_device()])
async def test_purecool_component_setup_only_once(devices, login, hass):
"""Test if entities are created only once."""
config = _get_config()
await async_setup_component(hass, dyson_parent.DOMAIN, config)
await hass.async_block_till_done()
discovery.load_platform(hass, AIQ_DOMAIN,
dyson_parent.DOMAIN, {}, config)
await hass.async_block_till_done()
assert len(hass.data[dyson.DYSON_AIQ_DEVICES]) == 1
@asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True)
@asynctest.patch('libpurecool.dyson.DysonAccount.devices',
return_value=[_get_dyson_purecool_device()])
async def test_purecool_aiq_without_discovery(devices, login, hass):
"""Test if component correctly returns if discovery not set."""
await async_setup_component(hass, dyson_parent.DOMAIN, _get_config())
await hass.async_block_till_done()
add_entities_mock = mock.MagicMock()
dyson.setup_platform(hass, None, add_entities_mock, None)
assert add_entities_mock.call_count == 0
@asynctest.patch('libpurecool.dyson.DysonAccount.login', return_value=True)
@asynctest.patch('libpurecool.dyson.DysonAccount.devices',
return_value=[_get_dyson_purecool_device()])
async def test_purecool_aiq_empty_environment_state(devices, login, hass):
"""Test device with empty environmental state."""
await async_setup_component(hass, dyson_parent.DOMAIN, _get_config())
await hass.async_block_till_done()
device = hass.data[dyson.DYSON_AIQ_DEVICES][0]
device._device.environmental_state = None
assert device.state is None
assert device.particulate_matter_2_5 is None
assert device.particulate_matter_10 is None
assert device.nitrogen_dioxide is None
assert device.volatile_organic_compounds is None
|
DavidLP/home-assistant
|
tests/components/dyson/test_air_quality.py
|
Python
|
apache-2.0
| 5,733
|
"""Unit test for treadmill.runtime.
"""
import errno
import socket
import unittest
import mock
import treadmill
import treadmill.rulefile
import treadmill.runtime
from treadmill import exc
class RuntimeTest(unittest.TestCase):
"""Tests for treadmill.runtime."""
@mock.patch('socket.socket.bind', mock.Mock())
def test__allocate_sockets(self):
"""Test allocating sockets.
"""
# access protected module _allocate_sockets
# pylint: disable=w0212
socket.socket.bind.side_effect = [
socket.error(errno.EADDRINUSE, 'In use'),
mock.DEFAULT,
mock.DEFAULT,
mock.DEFAULT
]
sockets = treadmill.runtime._allocate_sockets(
'prod', '0.0.0.0', socket.SOCK_STREAM, 3
)
self.assertEqual(3, len(sockets))
@mock.patch('socket.socket.bind', mock.Mock())
def test__allocate_sockets_fail(self):
"""Test allocating sockets when all are taken.
"""
# access protected module _allocate_sockets
# pylint: disable=w0212
socket.socket.bind.side_effect = socket.error(errno.EADDRINUSE,
'In use')
with self.assertRaises(exc.ContainerSetupError):
treadmill.runtime._allocate_sockets(
'prod', '0.0.0.0', socket.SOCK_STREAM, 3
)
@mock.patch('socket.socket', mock.Mock(autospec=True))
@mock.patch('treadmill.runtime._allocate_sockets', mock.Mock())
def test_allocate_network_ports(self):
"""Test network port allocation.
"""
# access protected module _allocate_network_ports
# pylint: disable=w0212
treadmill.runtime._allocate_sockets.side_effect = \
lambda _x, _y, _z, count: [socket.socket()] * count
mock_socket = socket.socket.return_value
mock_socket.getsockname.side_effect = [
('unused', 50001),
('unused', 60001),
('unused', 10000),
('unused', 10001),
('unused', 10002),
('unused', 12345),
('unused', 54321),
]
manifest = {
'type': 'native',
'environment': 'dev',
'endpoints': [
{
'name': 'http',
'port': 8000,
'proto': 'tcp',
}, {
'name': 'ssh',
'port': 0,
'proto': 'tcp',
}, {
'name': 'dns',
'port': 5353,
'proto': 'udp',
}, {
'name': 'port0',
'port': 0,
'proto': 'udp',
}
],
'ephemeral_ports': {'tcp': 3, 'udp': 0},
}
treadmill.runtime.allocate_network_ports(
'1.2.3.4',
manifest
)
# in the updated manifest, make sure that real_port is specificed from
# the ephemeral range as returnd by getsockname.
self.assertEqual(
8000,
manifest['endpoints'][0]['port']
)
self.assertEqual(
50001,
manifest['endpoints'][0]['real_port']
)
self.assertEqual(
60001,
manifest['endpoints'][1]['port']
)
self.assertEqual(
60001,
manifest['endpoints'][1]['real_port']
)
self.assertEqual(
5353,
manifest['endpoints'][2]['port']
)
self.assertEqual(
12345,
manifest['endpoints'][2]['real_port']
)
self.assertEqual(
54321,
manifest['endpoints'][3]['port']
)
self.assertEqual(
54321,
manifest['endpoints'][3]['real_port']
)
self.assertEqual(
[10000, 10001, 10002],
manifest['ephemeral_ports']['tcp']
)
if __name__ == '__main__':
unittest.main()
|
keithhendry/treadmill
|
tests/runtime_test.py
|
Python
|
apache-2.0
| 4,097
|
import cPickle
import gzip
import os, sys, errno
import time
import math
# numpy & theano imports need to be done in this order (only for some numpy installations, not sure why)
import numpy
#import gnumpy as gnp
# we need to explicitly import this in some cases, not sure why this doesn't get imported with numpy itself
import numpy.distutils.__config__
# and only after that can we import theano
import theano
from utils.providers import ListDataProvider
from frontend.label_normalisation import HTSLabelNormalisation, XMLLabelNormalisation
from frontend.silence_remover import SilenceRemover
from frontend.silence_remover import trim_silence
from frontend.min_max_norm import MinMaxNormalisation
from frontend.acoustic_composition import AcousticComposition
from frontend.parameter_generation import ParameterGeneration
from frontend.mean_variance_norm import MeanVarianceNorm
# the new class for label composition and normalisation
from frontend.label_composer import LabelComposer
from frontend.label_modifier import HTSLabelModification
#from frontend.mlpg_fast import MLParameterGenerationFast
#from frontend.mlpg_fast_layer import MLParameterGenerationFastLayer
import configuration
from models.deep_rnn import DeepRecurrentNetwork
from models.sdae import StackedDenoiseAutoEncoder
from utils.compute_distortion import DistortionComputation, IndividualDistortionComp
from utils.generate import generate_wav
from utils.learn_rates import ExpDecreaseLearningRate
from io_funcs.binary_io import BinaryIOCollection
#import matplotlib.pyplot as plt
# our custom logging class that can also plot
#from logplot.logging_plotting import LoggerPlotter, MultipleTimeSeriesPlot, SingleWeightMatrixPlot
from logplot.logging_plotting import LoggerPlotter, MultipleSeriesPlot, SingleWeightMatrixPlot
import logging # as logging
import logging.config
import StringIO
def extract_file_id_list(file_list):
file_id_list = []
for file_name in file_list:
file_id = os.path.basename(os.path.splitext(file_name)[0])
file_id_list.append(file_id)
return file_id_list
def read_file_list(file_name):
logger = logging.getLogger("read_file_list")
file_lists = []
fid = open(file_name)
for line in fid.readlines():
line = line.strip()
if len(line) < 1:
continue
file_lists.append(line)
fid.close()
logger.debug('Read file list from %s' % file_name)
return file_lists
def make_output_file_list(out_dir, in_file_lists):
out_file_lists = []
for in_file_name in in_file_lists:
file_id = os.path.basename(in_file_name)
out_file_name = out_dir + '/' + file_id
out_file_lists.append(out_file_name)
return out_file_lists
def prepare_file_path_list(file_id_list, file_dir, file_extension, new_dir_switch=True):
if not os.path.exists(file_dir) and new_dir_switch:
os.makedirs(file_dir)
file_name_list = []
for file_id in file_id_list:
file_name = file_dir + '/' + file_id + file_extension
file_name_list.append(file_name)
return file_name_list
def visualize_dnn(dnn):
layer_num = len(dnn.params) ## including input and output
plotlogger = logging.getLogger("plotting")
for i in xrange(layer_num):
fig_name = 'Activation weights W' + str(i) + '_' + dnn.params[i].name
fig_title = 'Activation weights of W' + str(i)
xlabel = 'Neuron index of hidden layer ' + str(i)
ylabel = 'Neuron index of hidden layer ' + str(i+1)
if i == 0:
xlabel = 'Input feature index'
if i == layer_num-1:
ylabel = 'Output feature index'
aa = dnn.params[i].get_value(borrow=True).T
print aa.shape, aa.size
if aa.size > aa.shape[0]:
logger.create_plot(fig_name, SingleWeightMatrixPlot)
plotlogger.add_plot_point(fig_name, fig_name, dnn.params[i].get_value(borrow=True).T)
plotlogger.save_plot(fig_name, title=fig_name, xlabel=xlabel, ylabel=ylabel)
def load_covariance(var_file_dict, out_dimension_dict):
var = {}
io_funcs = BinaryIOCollection()
for feature_name in var_file_dict.keys():
var_values, dimension = io_funcs.load_binary_file_frame(var_file_dict[feature_name], 1)
var_values = numpy.reshape(var_values, (out_dimension_dict[feature_name], 1))
var[feature_name] = var_values
return var
def train_DNN(train_xy_file_list, valid_xy_file_list, \
nnets_file_name, n_ins, n_outs, ms_outs, hyper_params, buffer_size, plot=False, var_dict=None,
cmp_mean_vector = None, cmp_std_vector = None, init_dnn_model_file = None):
# get loggers for this function
# this one writes to both console and file
logger = logging.getLogger("main.train_DNN")
logger.debug('Starting train_DNN')
if plot:
# this one takes care of plotting duties
plotlogger = logging.getLogger("plotting")
# create an (empty) plot of training convergence, ready to receive data points
logger.create_plot('training convergence',MultipleSeriesPlot)
try:
assert numpy.sum(ms_outs) == n_outs
except AssertionError:
logger.critical('the summation of multi-stream outputs does not equal to %d' %(n_outs))
raise
####parameters#####
finetune_lr = float(hyper_params['learning_rate'])
training_epochs = int(hyper_params['training_epochs'])
batch_size = int(hyper_params['batch_size'])
l1_reg = float(hyper_params['l1_reg'])
l2_reg = float(hyper_params['l2_reg'])
warmup_epoch = int(hyper_params['warmup_epoch'])
momentum = float(hyper_params['momentum'])
warmup_momentum = float(hyper_params['warmup_momentum'])
hidden_layer_size = hyper_params['hidden_layer_size']
buffer_utt_size = buffer_size
early_stop_epoch = int(hyper_params['early_stop_epochs'])
hidden_activation = hyper_params['hidden_activation']
output_activation = hyper_params['output_activation']
model_type = hyper_params['model_type']
hidden_layer_type = hyper_params['hidden_layer_type']
## use a switch to turn on pretraining
## pretraining may not help too much, if this case, we turn it off to save time
do_pretraining = hyper_params['do_pretraining']
pretraining_epochs = int(hyper_params['pretraining_epochs'])
pretraining_lr = float(hyper_params['pretraining_lr'])
sequential_training = hyper_params['sequential_training']
dropout_rate = hyper_params['dropout_rate']
# sequential_training = True
buffer_size = int(buffer_size / batch_size) * batch_size
###################
(train_x_file_list, train_y_file_list) = train_xy_file_list
(valid_x_file_list, valid_y_file_list) = valid_xy_file_list
logger.debug('Creating training data provider')
train_data_reader = ListDataProvider(x_file_list = train_x_file_list, y_file_list = train_y_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, shuffle = True)
logger.debug('Creating validation data provider')
valid_data_reader = ListDataProvider(x_file_list = valid_x_file_list, y_file_list = valid_y_file_list,
n_ins = n_ins, n_outs = n_outs, buffer_size = buffer_size, sequential = sequential_training, shuffle = False)
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
train_set_x, train_set_y = shared_train_set_xy
shared_valid_set_xy, valid_set_x, valid_set_y = valid_data_reader.load_one_partition() #validation data is still read block by block
valid_set_x, valid_set_y = shared_valid_set_xy
train_data_reader.reset()
valid_data_reader.reset()
##temporally we use the training set as pretrain_set_x.
##we need to support any data for pretraining
# pretrain_set_x = train_set_x
# numpy random generator
numpy_rng = numpy.random.RandomState(123)
logger.info('building the model')
dnn_model = None
pretrain_fn = None ## not all the model support pretraining right now
train_fn = None
valid_fn = None
valid_model = None ## valid_fn and valid_model are the same. reserve to computer multi-stream distortion
if model_type == 'DNN':
dnn_model = DeepRecurrentNetwork(n_in= n_ins, hidden_layer_size = hidden_layer_size, n_out = n_outs,
L1_reg = l1_reg, L2_reg = l2_reg, hidden_layer_type = hidden_layer_type, dropout_rate = dropout_rate)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y)) #, batch_size=batch_size
elif model_type == 'SDAE':
dnn_model = StackedDenoiseAutoEncoder(n_in= n_ins, hidden_layer_size = hidden_layer_size, n_out = n_outs,
L1_reg = l1_reg, L2_reg = l2_reg, hidden_layer_type = hidden_layer_type, dropout_rate = dropout_rate)
if do_pretraining:
#temporally we use the training set as pretrain_set_x.
#we need to support any data for pretraining
pretrain_set_x = train_set_x
pretraining_fn = dnn_model.pretraining_functions(pretrain_set_x)
train_fn, valid_fn = dnn_model.build_finetune_functions(
(train_set_x, train_set_y), (valid_set_x, valid_set_y)) #, batch_size=batch_size
else:
logger.critical('%s type NN model is not supported!' %(model_type))
raise
## if pretraining is supported more than one model, add the switch here
## be careful to use autoencoder for pretraining here:
if do_pretraining and model_type == 'SDAE':
logger.info('pretraining the %s model' %(model_type))
corruption_level = 0.0
## in SDAE we do layer-wise pretraining using autoencoders
for i in xrange(dnn_model.n_layers):
for epoch in xrange(pretraining_epochs):
sub_start_time = time.clock()
pretrain_loss = []
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
# if sequential training, the batch size will be the number of frames in an utterance
if sequential_training == True:
batch_size = temp_train_set_x.shape[0]
n_train_batches = temp_train_set_x.shape[0] / batch_size
for index in xrange(n_train_batches):
## send a batch to the shared variable, rather than pass the batch size and batch index to the finetune function
pretrain_set_x.set_value(numpy.asarray(temp_train_set_x[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True)
pretrain_loss.append(pretraining_fn[i](corruption=corruption_level,
learning_rate=pretraining_lr))
sub_end_time = time.clock()
logger.info('Pre-training layer %i, epoch %d, cost %s, time spent%.2f' % (i+1, epoch+1, numpy.mean(pretrain_loss), (sub_end_time - sub_start_time)))
train_data_reader.reset()
logger.info('fine-tuning the %s model' %(model_type))
start_time = time.time()
best_dnn_model = dnn_model
best_validation_loss = sys.float_info.max
previous_loss = sys.float_info.max
early_stop = 0
epoch = 0
# finetune_lr = 0.000125
previous_finetune_lr = finetune_lr
print finetune_lr
while (epoch < training_epochs):
epoch = epoch + 1
current_momentum = momentum
current_finetune_lr = finetune_lr
if epoch <= warmup_epoch:
current_finetune_lr = finetune_lr
current_momentum = warmup_momentum
else:
current_finetune_lr = previous_finetune_lr * 0.5
previous_finetune_lr = current_finetune_lr
train_error = []
sub_start_time = time.time()
while (not train_data_reader.is_finish()):
shared_train_set_xy, temp_train_set_x, temp_train_set_y = train_data_reader.load_one_partition()
# train_set_x.set_value(numpy.asarray(temp_train_set_x, dtype=theano.config.floatX), borrow=True)
# train_set_y.set_value(numpy.asarray(temp_train_set_y, dtype=theano.config.floatX), borrow=True)
# if sequential training, the batch size will be the number of frames in an utterance
if sequential_training == True:
batch_size = temp_train_set_x.shape[0]
n_train_batches = temp_train_set_x.shape[0] / batch_size
for index in xrange(n_train_batches):
## send a batch to the shared variable, rather than pass the batch size and batch index to the finetune function
train_set_x.set_value(numpy.asarray(temp_train_set_x[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True)
train_set_y.set_value(numpy.asarray(temp_train_set_y[index*batch_size:(index + 1)*batch_size], dtype=theano.config.floatX), borrow=True)
this_train_error = train_fn(current_finetune_lr, current_momentum)
train_error.append(this_train_error)
train_data_reader.reset()
logger.debug('calculating validation loss')
validation_losses = []
while (not valid_data_reader.is_finish()):
shared_valid_set_xy, temp_valid_set_x, temp_valid_set_y = valid_data_reader.load_one_partition()
valid_set_x.set_value(numpy.asarray(temp_valid_set_x, dtype=theano.config.floatX), borrow=True)
valid_set_y.set_value(numpy.asarray(temp_valid_set_y, dtype=theano.config.floatX), borrow=True)
this_valid_loss = valid_fn()
validation_losses.append(this_valid_loss)
valid_data_reader.reset()
this_validation_loss = numpy.mean(validation_losses)
this_train_valid_loss = numpy.mean(numpy.asarray(train_error))
sub_end_time = time.time()
loss_difference = this_validation_loss - previous_loss
logger.info('epoch %i, validation error %f, train error %f time spent %.2f' %(epoch, this_validation_loss, this_train_valid_loss, (sub_end_time - sub_start_time)))
if plot:
plotlogger.add_plot_point('training convergence','validation set',(epoch,this_validation_loss))
plotlogger.add_plot_point('training convergence','training set',(epoch,this_train_valid_loss))
plotlogger.save_plot('training convergence',title='Progress of training and validation error',xlabel='epochs',ylabel='error')
if this_validation_loss < best_validation_loss:
if epoch > 5:
cPickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
best_dnn_model = dnn_model
best_validation_loss = this_validation_loss
# logger.debug('validation loss decreased, so saving model')
if this_validation_loss >= previous_loss:
logger.debug('validation loss increased')
# dbn = best_dnn_model
early_stop += 1
if epoch > 15 and early_stop > early_stop_epoch:
logger.debug('stopping early')
break
if math.isnan(this_validation_loss):
break
previous_loss = this_validation_loss
end_time = time.time()
# cPickle.dump(best_dnn_model, open(nnets_file_name, 'wb'))
logger.info('overall training time: %.2fm validation error %f' % ((end_time - start_time) / 60., best_validation_loss))
if plot:
plotlogger.save_plot('training convergence',title='Final training and validation error',xlabel='epochs',ylabel='error')
return best_validation_loss
def dnn_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in xrange(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
test_set_x = features.reshape((-1, n_ins))
predicted_parameter = dnn_model.parameter_prediction(test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def dnn_generation_lstm(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
visualize_dnn(dnn_model)
file_number = len(valid_file_list)
for i in xrange(file_number): #file_number
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
test_set_x = features.reshape((-1, n_ins))
predicted_parameter = dnn_model.parameter_prediction_lstm(test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
##generate bottleneck layer as festures
def dnn_hidden_generation(valid_file_list, nnets_file_name, n_ins, n_outs, out_file_list):
logger = logging.getLogger("dnn_generation")
logger.debug('Starting dnn_generation')
plotlogger = logging.getLogger("plotting")
dnn_model = cPickle.load(open(nnets_file_name, 'rb'))
file_number = len(valid_file_list)
for i in xrange(file_number):
logger.info('generating %4d of %4d: %s' % (i+1,file_number,valid_file_list[i]) )
fid_lab = open(valid_file_list[i], 'rb')
features = numpy.fromfile(fid_lab, dtype=numpy.float32)
fid_lab.close()
features = features[:(n_ins * (features.size / n_ins))]
features = features.reshape((-1, n_ins))
temp_set_x = features.tolist()
test_set_x = theano.shared(numpy.asarray(temp_set_x, dtype=theano.config.floatX))
predicted_parameter = dnn_model.generate_top_hidden_layer(test_set_x=test_set_x)
### write to cmp file
predicted_parameter = numpy.array(predicted_parameter, 'float32')
temp_parameter = predicted_parameter
fid = open(out_file_list[i], 'wb')
predicted_parameter.tofile(fid)
logger.debug('saved to %s' % out_file_list[i])
fid.close()
def main_function(cfg):
# get a logger for this main function
logger = logging.getLogger("main")
# get another logger to handle plotting duties
plotlogger = logging.getLogger("plotting")
# later, we might do this via a handler that is created, attached and configured
# using the standard config mechanism of the logging module
# but for now we need to do it manually
plotlogger.set_plot_path(cfg.plot_dir)
#### parameter setting########
hidden_layer_size = cfg.hyper_params['hidden_layer_size']
####prepare environment
try:
file_id_list = read_file_list(cfg.file_id_scp)
logger.debug('Loaded file id list from %s' % cfg.file_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.file_id_scp)
raise
###total file number including training, development, and testing
total_file_number = len(file_id_list)
data_dir = cfg.data_dir
nn_cmp_dir = os.path.join(data_dir, 'nn' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
nn_cmp_norm_dir = os.path.join(data_dir, 'nn_norm' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim))
model_dir = os.path.join(cfg.work_dir, 'nnets_model')
gen_dir = os.path.join(cfg.work_dir, 'gen')
in_file_list_dict = {}
for feature_name in cfg.in_dir_dict.keys():
in_file_list_dict[feature_name] = prepare_file_path_list(file_id_list, cfg.in_dir_dict[feature_name], cfg.file_extension_dict[feature_name], False)
nn_cmp_file_list = prepare_file_path_list(file_id_list, nn_cmp_dir, cfg.cmp_ext)
nn_cmp_norm_file_list = prepare_file_path_list(file_id_list, nn_cmp_norm_dir, cfg.cmp_ext)
###normalisation information
norm_info_file = os.path.join(data_dir, 'norm_info' + cfg.combined_feature_name + '_' + str(cfg.cmp_dim) + '_' + cfg.output_feature_normalisation + '.dat')
### normalise input full context label
# currently supporting two different forms of lingustic features
# later, we should generalise this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats)
lab_dim = label_normaliser.dimension + cfg.appended_input_dim
logger.info('Input label dimension is %d' % lab_dim)
suffix=str(lab_dim)
# no longer supported - use new "composed" style labels instead
elif cfg.label_style == 'composed':
# label_normaliser = XMLLabelNormalisation(xpath_file_name=cfg.xpath_file_name)
suffix='composed'
if cfg.process_labels_in_work_dir:
label_data_dir = cfg.work_dir
else:
label_data_dir = data_dir
# the number can be removed
binary_label_dir = os.path.join(label_data_dir, 'binary_label_'+suffix)
nn_label_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_'+suffix)
nn_label_norm_dir = os.path.join(label_data_dir, 'nn_no_silence_lab_norm_'+suffix)
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(file_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(file_id_list, nn_label_norm_dir, cfg.lab_ext)
dur_file_list = prepare_file_path_list(file_id_list, cfg.in_dur_dir, cfg.dur_ext)
lf0_file_list = prepare_file_path_list(file_id_list, cfg.in_lf0_dir, cfg.lf0_ext)
# to do - sanity check the label dimension here?
min_max_normaliser = None
label_norm_file = 'label_norm_%s_%d.dat' %(cfg.label_style, lab_dim)
label_norm_file = os.path.join(label_data_dir, label_norm_file)
if cfg.GenTestList:
try:
test_id_list = read_file_list(cfg.test_id_scp)
logger.debug('Loaded file id list from %s' % cfg.test_id_scp)
except IOError:
# this means that open(...) threw an error
logger.critical('Could not load file id list from %s' % cfg.test_id_scp)
raise
in_label_align_file_list = prepare_file_path_list(test_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(test_id_list, binary_label_dir, cfg.lab_ext)
nn_label_file_list = prepare_file_path_list(test_id_list, nn_label_dir, cfg.lab_ext)
nn_label_norm_file_list = prepare_file_path_list(test_id_list, nn_label_norm_dir, cfg.lab_ext)
if cfg.NORMLAB and (cfg.label_style == 'HTS'):
# simple HTS labels
logger.info('preparing label data (input) using standard HTS style labels')
label_normaliser.perform_normalisation(in_label_align_file_list, binary_label_file_list)
remover = SilenceRemover(n_cmp = lab_dim, silence_pattern = cfg.silence_pattern, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats)
remover.remove_silence(binary_label_file_list, in_label_align_file_list, nn_label_file_list)
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
if cfg.GenTestList:
min_max_normaliser.load_min_max_values(label_norm_file)
else:
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if cfg.NORMLAB and (cfg.label_style == 'composed'):
# new flexible label preprocessor
logger.info('preparing label data (input) using "composed" style labels')
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
logger.info('Loaded label configuration')
# logger.info('%s' % label_composer.configuration.labels )
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension will be %d' % lab_dim)
if cfg.precompile_xpaths:
label_composer.precompile_xpaths()
# there are now a set of parallel input label files (e.g, one set of HTS and another set of Ossian trees)
# create all the lists of these, ready to pass to the label composer
in_label_align_file_list = {}
for label_style, label_style_required in label_composer.label_styles.iteritems():
if label_style_required:
logger.info('labels of style %s are required - constructing file paths for them' % label_style)
if label_style == 'xpath':
in_label_align_file_list['xpath'] = prepare_file_path_list(file_id_list, cfg.xpath_label_align_dir, cfg.utt_ext, False)
elif label_style == 'hts':
in_label_align_file_list['hts'] = prepare_file_path_list(file_id_list, cfg.hts_label_align_dir, cfg.lab_ext, False)
else:
logger.critical('unsupported label style %s specified in label configuration' % label_style)
raise Exception
# now iterate through the files, one at a time, constructing the labels for them
num_files=len(file_id_list)
logger.info('the label styles required are %s' % label_composer.label_styles)
for i in xrange(num_files):
logger.info('making input label features for %4d of %4d' % (i+1,num_files))
# iterate through the required label styles and open each corresponding label file
# a dictionary of file descriptors, pointing at the required files
required_labels={}
for label_style, label_style_required in label_composer.label_styles.iteritems():
# the files will be a parallel set of files for a single utterance
# e.g., the XML tree and an HTS label file
if label_style_required:
required_labels[label_style] = open(in_label_align_file_list[label_style][i] , 'r')
logger.debug(' opening label file %s' % in_label_align_file_list[label_style][i])
logger.debug('label styles with open files: %s' % required_labels)
label_composer.make_labels(required_labels,out_file_name=binary_label_file_list[i],fill_missing_values=cfg.fill_missing_values,iterate_over_frames=cfg.iterate_over_frames)
# now close all opened files
for fd in required_labels.itervalues():
fd.close()
# silence removal
if cfg.remove_silence_using_binary_labels:
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from label using silence feature: %s'%(label_composer.configuration.labels[silence_feature]))
logger.info('Silence will be removed from CMP files in same way')
## Binary labels have 2 roles: both the thing trimmed and the instructions for trimming:
trim_silence(binary_label_file_list, nn_label_file_list, lab_dim, \
binary_label_file_list, lab_dim, silence_feature)
else:
logger.info('No silence removal done')
# start from the labels we have just produced, not trimmed versions
nn_label_file_list = binary_label_file_list
min_max_normaliser = MinMaxNormalisation(feature_dimension = lab_dim, min_value = 0.01, max_value = 0.99)
###use only training data to find min-max information, then apply on the whole dataset
min_max_normaliser.find_min_max_values(nn_label_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_label_file_list, nn_label_norm_file_list)
if min_max_normaliser != None and not cfg.GenTestList:
### save label normalisation information for unseen testing labels
label_min_vector = min_max_normaliser.min_vector
label_max_vector = min_max_normaliser.max_vector
label_norm_info = numpy.concatenate((label_min_vector, label_max_vector), axis=0)
label_norm_info = numpy.array(label_norm_info, 'float32')
fid = open(label_norm_file, 'wb')
label_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(label_min_vector.size, label_norm_file))
### make output duration data
if cfg.MAKEDUR:
logger.info('creating duration (output) features')
feature_type = cfg.dur_feature_type
label_normaliser.prepare_dur_data(in_label_align_file_list, dur_file_list, feature_type)
### make output acoustic data
if cfg.MAKECMP:
logger.info('creating acoustic (output) features')
delta_win = cfg.delta_win #[-0.5, 0.0, 0.5]
acc_win = cfg.acc_win #[1.0, -2.0, 1.0]
acoustic_worker = AcousticComposition(delta_win = delta_win, acc_win = acc_win)
if 'dur' in cfg.in_dir_dict.keys() and cfg.AcousticModel:
acoustic_worker.make_equal_frames(dur_file_list, lf0_file_list, cfg.in_dimension_dict)
acoustic_worker.prepare_nn_data(in_file_list_dict, nn_cmp_file_list, cfg.in_dimension_dict, cfg.out_dimension_dict)
if cfg.remove_silence_using_binary_labels:
## do this to get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
silence_feature = 0 ## use first feature in label -- hardcoded for now
logger.info('Silence removal from CMP using binary label file')
## overwrite the untrimmed audio with the trimmed version:
trim_silence(nn_cmp_file_list, nn_cmp_file_list, cfg.cmp_dim,
binary_label_file_list, lab_dim, silence_feature)
else: ## back off to previous method using HTS labels:
remover = SilenceRemover(n_cmp = cfg.cmp_dim, silence_pattern = cfg.silence_pattern, remove_frame_features = cfg.add_frame_features, subphone_feats = cfg.subphone_feats)
remover.remove_silence(nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number],
in_label_align_file_list[0:cfg.train_file_number+cfg.valid_file_number],
nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number]) # save to itself
### save acoustic normalisation information for normalising the features back
var_dir = os.path.join(data_dir, 'var')
if not os.path.exists(var_dir):
os.makedirs(var_dir)
var_file_dict = {}
for feature_name in cfg.out_dimension_dict.keys():
var_file_dict[feature_name] = os.path.join(var_dir, feature_name + '_' + str(cfg.out_dimension_dict[feature_name]))
### normalise output acoustic data
if cfg.NORMCMP:
logger.info('normalising acoustic (output) features using method %s' % cfg.output_feature_normalisation)
cmp_norm_info = None
if cfg.output_feature_normalisation == 'MVN':
normaliser = MeanVarianceNorm(feature_dimension=cfg.cmp_dim)
###calculate mean and std vectors on the training data, and apply on the whole dataset
global_mean_vector = normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number], 0, cfg.cmp_dim)
global_std_vector = normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector, 0, cfg.cmp_dim)
normaliser.feature_normalisation(nn_cmp_file_list[0:cfg.train_file_number+cfg.valid_file_number],
nn_cmp_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number])
cmp_norm_info = numpy.concatenate((global_mean_vector, global_std_vector), axis=0)
elif cfg.output_feature_normalisation == 'MINMAX':
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim)
global_mean_vector = min_max_normaliser.compute_mean(nn_cmp_file_list[0:cfg.train_file_number])
global_std_vector = min_max_normaliser.compute_std(nn_cmp_file_list[0:cfg.train_file_number], global_mean_vector)
min_max_normaliser = MinMaxNormalisation(feature_dimension = cfg.cmp_dim, min_value = 0.01, max_value = 0.99)
min_max_normaliser.find_min_max_values(nn_cmp_file_list[0:cfg.train_file_number])
min_max_normaliser.normalise_data(nn_cmp_file_list, nn_cmp_norm_file_list)
cmp_min_vector = min_max_normaliser.min_vector
cmp_max_vector = min_max_normaliser.max_vector
cmp_norm_info = numpy.concatenate((cmp_min_vector, cmp_max_vector), axis=0)
else:
logger.critical('Normalisation type %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
cmp_norm_info = numpy.array(cmp_norm_info, 'float32')
fid = open(norm_info_file, 'wb')
cmp_norm_info.tofile(fid)
fid.close()
logger.info('saved %s vectors to %s' %(cfg.output_feature_normalisation, norm_info_file))
feature_index = 0
for feature_name in cfg.out_dimension_dict.keys():
feature_std_vector = numpy.array(global_std_vector[:,feature_index:feature_index+cfg.out_dimension_dict[feature_name]], 'float32')
fid = open(var_file_dict[feature_name], 'w')
feature_std_vector.tofile(fid)
fid.close()
logger.info('saved %s variance vector to %s' %(feature_name, var_file_dict[feature_name]))
feature_index += cfg.out_dimension_dict[feature_name]
train_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number]
train_y_file_list = nn_cmp_norm_file_list[0:cfg.train_file_number]
valid_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
valid_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_y_file_list = nn_cmp_norm_file_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
# we need to know the label dimension before training the DNN
# computing that requires us to look at the labels
#
# currently, there are two ways to do this
if cfg.label_style == 'HTS':
label_normaliser = HTSLabelNormalisation(question_file_name=cfg.question_file_name, add_frame_features=cfg.add_frame_features, subphone_feats=cfg.subphone_feats)
lab_dim = label_normaliser.dimension + cfg.appended_input_dim
elif cfg.label_style == 'composed':
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
logger.info('label dimension is %d' % lab_dim)
combined_model_arch = str(len(hidden_layer_size))
for hid_size in hidden_layer_size:
combined_model_arch += '_' + str(hid_size)
nnets_file_name = '%s/%s_%s_%d_%s_%d.%d.train.%d.%f.rnn.model' \
%(model_dir, cfg.combined_model_name, cfg.combined_feature_name, int(cfg.multistream_switch),
combined_model_arch, lab_dim, cfg.cmp_dim, cfg.train_file_number, cfg.hyper_params['learning_rate'])
### DNN model training
if cfg.TRAINDNN:
var_dict = load_covariance(var_file_dict, cfg.out_dimension_dict)
logger.info('training DNN')
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_mean_vector = cmp_min_max[0, ]
cmp_std_vector = cmp_min_max[1, ]
try:
os.makedirs(model_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create model directory %s' % model_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
try:
train_DNN(train_xy_file_list = (train_x_file_list, train_y_file_list), \
valid_xy_file_list = (valid_x_file_list, valid_y_file_list), \
nnets_file_name = nnets_file_name, \
n_ins = lab_dim, n_outs = cfg.cmp_dim, ms_outs = cfg.multistream_outs, \
hyper_params = cfg.hyper_params, buffer_size = cfg.buffer_size, plot = cfg.plot, var_dict = var_dict,
cmp_mean_vector = cmp_mean_vector, cmp_std_vector = cmp_std_vector)
except KeyboardInterrupt:
logger.critical('train_DNN interrupted via keyboard')
# Could 'raise' the exception further, but that causes a deep traceback to be printed
# which we don't care about for a keyboard interrupt. So, just bail out immediately
sys.exit(1)
except:
logger.critical('train_DNN threw an exception')
raise
if cfg.GENBNFEA:
'''
Please only tune on this step when you want to generate bottleneck features from DNN
'''
temp_dir_name = '%s_%s_%d_%d_%d_%d_%s_hidden' \
%(cfg.model_type, cfg.combined_feature_name, \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layers_sizes), combined_model_arch)
gen_dir = os.path.join(gen_dir, temp_dir_name)
bottleneck_size = min(hidden_layers_sizes)
bottleneck_index = 0
for i in xrange(len(hidden_layers_sizes)):
if hidden_layers_sizes(i) == bottleneck_size:
bottleneck_index = i
logger.info('generating bottleneck features from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_id_list = file_id_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[0:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
dnn_hidden_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list, bottleneck_index)
### generate parameters from DNN
temp_dir_name = '%s_%s_%d_%d_%d_%d_%d_%d_%d' \
%(cfg.combined_model_name, cfg.combined_feature_name, int(cfg.do_post_filtering), \
cfg.train_file_number, lab_dim, cfg.cmp_dim, \
len(hidden_layer_size), hidden_layer_size[0], hidden_layer_size[-1])
gen_dir = os.path.join(gen_dir, temp_dir_name)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
test_x_file_list = nn_label_norm_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.GenTestList:
gen_file_id_list = test_id_list
test_x_file_list = nn_label_norm_file_list
if cfg.DNNGEN:
logger.info('generating from DNN')
try:
os.makedirs(gen_dir)
except OSError as e:
if e.errno == errno.EEXIST:
# not an error - just means directory already exists
pass
else:
logger.critical('Failed to create generation directory %s' % gen_dir)
logger.critical(' OS error was: %s' % e.strerror)
raise
gen_file_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.cmp_ext)
dnn_generation(test_x_file_list, nnets_file_name, lab_dim, cfg.cmp_dim, gen_file_list)
logger.debug('denormalising generated output using method %s' % cfg.output_feature_normalisation)
fid = open(norm_info_file, 'rb')
cmp_min_max = numpy.fromfile(fid, dtype=numpy.float32)
fid.close()
cmp_min_max = cmp_min_max.reshape((2, -1))
cmp_min_vector = cmp_min_max[0, ]
cmp_max_vector = cmp_min_max[1, ]
if cfg.output_feature_normalisation == 'MVN':
denormaliser = MeanVarianceNorm(feature_dimension = cfg.cmp_dim)
denormaliser.feature_denormalisation(gen_file_list, gen_file_list, cmp_min_vector, cmp_max_vector)
elif cfg.output_feature_normalisation == 'MINMAX':
denormaliser = MinMaxNormalisation(cfg.cmp_dim, min_value = 0.01, max_value = 0.99, min_vector = cmp_min_vector, max_vector = cmp_max_vector)
denormaliser.denormalise_data(gen_file_list, gen_file_list)
else:
logger.critical('denormalising method %s is not supported!\n' %(cfg.output_feature_normalisation))
raise
if cfg.AcousticModel:
##perform MLPG to smooth parameter trajectory
## lf0 is included, the output features much have vuv.
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.acoustic_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict, var_file_dict, do_MLPG=cfg.do_MLPG)
if cfg.DurationModel:
### Perform duration normalization(min. state dur set to 1) ###
gen_dur_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.dur_ext)
gen_label_list = prepare_file_path_list(gen_file_id_list, gen_dir, cfg.lab_ext)
in_gen_label_align_file_list = prepare_file_path_list(gen_file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
generator = ParameterGeneration(gen_wav_features = cfg.gen_wav_features)
generator.duration_decomposition(gen_file_list, cfg.cmp_dim, cfg.out_dimension_dict, cfg.file_extension_dict)
label_modifier = HTSLabelModification(silence_pattern = cfg.silence_pattern)
label_modifier.modify_duration_labels(in_gen_label_align_file_list, gen_dur_list, gen_label_list)
### generate wav
if cfg.GENWAV:
logger.info('reconstructing waveform(s)')
print len(gen_file_id_list)
generate_wav(gen_dir, gen_file_id_list, cfg) # generated speech
# generate_wav(nn_cmp_dir, gen_file_id_list, cfg) # reference copy synthesis speech
### setting back to original conditions before calculating objective scores ###
if cfg.GenTestList:
in_label_align_file_list = prepare_file_path_list(file_id_list, cfg.in_label_align_dir, cfg.lab_ext, False)
binary_label_file_list = prepare_file_path_list(file_id_list, binary_label_dir, cfg.lab_ext)
gen_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
### evaluation: RMSE and CORR for duration
if cfg.CALMCD and cfg.DurationModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_dur_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.dur_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_dur_list, cfg.dur_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.dur_dim, silence_pattern = cfg.silence_pattern, remove_frame_features = cfg.add_frame_features)
remover.remove_silence(in_file_list_dict['dur'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_dur_list)
valid_dur_rmse, valid_dur_corr = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
test_dur_rmse, test_dur_corr = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.dur_ext, cfg.dur_dim)
logger.info('Develop: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(valid_dur_rmse, valid_dur_corr))
logger.info('Test: DNN -- RMSE: %.3f frames/phoneme; CORR: %.3f; ' \
%(test_dur_rmse, test_dur_corr))
### evaluation: calculate distortion
if cfg.CALMCD and cfg.AcousticModel:
logger.info('calculating MCD')
ref_data_dir = os.path.join(data_dir, 'ref_data')
ref_mgc_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.mgc_ext)
ref_bap_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.bap_ext)
ref_lf0_list = prepare_file_path_list(gen_file_id_list, ref_data_dir, cfg.lf0_ext)
in_gen_label_align_file_list = in_label_align_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
calculator = IndividualDistortionComp()
spectral_distortion = 0.0
bap_mse = 0.0
f0_mse = 0.0
vuv_error = 0.0
valid_file_id_list = file_id_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number]
test_file_id_list = file_id_list[cfg.train_file_number+cfg.valid_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.remove_silence_using_binary_labels:
## get lab_dim:
label_composer = LabelComposer()
label_composer.load_label_configuration(cfg.label_config_file)
lab_dim=label_composer.compute_label_dimension()
## use first feature in label -- hardcoded for now
silence_feature = 0
## Use these to trim silence:
untrimmed_test_labels = binary_label_file_list[cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
if cfg.in_dimension_dict.has_key('mgc'):
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_mgc_list, cfg.mgc_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.mgc_dim, silence_pattern = cfg.silence_pattern)
remover.remove_silence(in_file_list_dict['mgc'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_mgc_list)
valid_spectral_distortion = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
test_spectral_distortion = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.mgc_ext, cfg.mgc_dim)
valid_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
test_spectral_distortion *= (10 /numpy.log(10)) * numpy.sqrt(2.0) ##MCD
if cfg.in_dimension_dict.has_key('bap'):
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_bap_list, cfg.bap_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.bap_dim, silence_pattern = cfg.silence_pattern)
remover.remove_silence(in_file_list_dict['bap'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_bap_list)
valid_bap_mse = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
test_bap_mse = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.bap_ext, cfg.bap_dim)
valid_bap_mse = valid_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
test_bap_mse = test_bap_mse / 10.0 ##Cassia's bap is computed from 10*log|S(w)|. if use HTS/SPTK style, do the same as MGC
if cfg.in_dimension_dict.has_key('lf0'):
if cfg.remove_silence_using_binary_labels:
untrimmed_reference_data = in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number]
trim_silence(untrimmed_reference_data, ref_lf0_list, cfg.lf0_dim, \
untrimmed_test_labels, lab_dim, silence_feature)
else:
remover = SilenceRemover(n_cmp = cfg.lf0_dim, silence_pattern = cfg.silence_pattern)
remover.remove_silence(in_file_list_dict['lf0'][cfg.train_file_number:cfg.train_file_number+cfg.valid_file_number+cfg.test_file_number], in_gen_label_align_file_list, ref_lf0_list)
valid_f0_mse, valid_f0_corr, valid_vuv_error = calculator.compute_distortion(valid_file_id_list, ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
test_f0_mse , test_f0_corr, test_vuv_error = calculator.compute_distortion(test_file_id_list , ref_data_dir, gen_dir, cfg.lf0_ext, cfg.lf0_dim)
logger.info('Develop: DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(valid_spectral_distortion, valid_bap_mse, valid_f0_mse, valid_f0_corr, valid_vuv_error*100.))
logger.info('Test : DNN -- MCD: %.3f dB; BAP: %.3f dB; F0:- RMSE: %.3f Hz; CORR: %.3f; VUV: %.3f%%' \
%(test_spectral_distortion , test_bap_mse , test_f0_mse , test_f0_corr, test_vuv_error*100.))
if __name__ == '__main__':
# these things should be done even before trying to parse the command line
# create a configuration instance
# and get a short name for this instance
cfg=configuration.cfg
# set up logging to use our custom class
logging.setLoggerClass(LoggerPlotter)
# get a logger for this main function
logger = logging.getLogger("main")
if len(sys.argv) != 2:
logger.critical('usage: run_dnn.sh [config file name]')
sys.exit(1)
config_file = sys.argv[1]
config_file = os.path.abspath(config_file)
cfg.configure(config_file)
if cfg.profile:
logger.info('profiling is activated')
import cProfile, pstats
cProfile.run('main_function(cfg)', 'mainstats')
# create a stream for the profiler to write to
profiling_output = StringIO.StringIO()
p = pstats.Stats('mainstats', stream=profiling_output)
# print stats to that stream
# here we just report the top 10 functions, sorted by total amount of time spent in each
p.strip_dirs().sort_stats('tottime').print_stats(10)
# print the result to the log
logger.info('---Profiling result follows---\n%s' % profiling_output.getvalue() )
profiling_output.close()
logger.info('---End of profiling result---')
else:
main_function(cfg)
# if gnp._boardId is not None:
# import gpu_lock
# gpu_lock.free_lock(gnp._boardId)
sys.exit(0)
|
ronanki/merlin
|
src/work_in_progress/run_sdae.py
|
Python
|
apache-2.0
| 55,019
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Code snippets used in webdocs.
The examples here are written specifically to read well with the accompanying
web docs. Do not rewrite them until you make sure the webdocs still read well
and the rewritten code supports the concept being described. For example, there
are snippets that could be shorter but they are written like this to make a
specific point in the docs.
The code snippets are all organized as self contained functions. Parts of the
function body delimited by [START tag] and [END tag] will be included
automatically in the web docs. The naming convention for the tags is to have as
prefix the PATH_TO_HTML where they are included followed by a descriptive
string. The tags can contain only letters, digits and _.
"""
import apache_beam as beam
from apache_beam.test_pipeline import TestPipeline
from apache_beam.metrics import Metrics
# Quiet some pylint warnings that happen because of the somewhat special
# format for the code snippets.
# pylint:disable=invalid-name
# pylint:disable=expression-not-assigned
# pylint:disable=redefined-outer-name
# pylint:disable=reimported
# pylint:disable=unused-variable
# pylint:disable=wrong-import-order, wrong-import-position
class SnippetUtils(object):
from apache_beam.pipeline import PipelineVisitor
class RenameFiles(PipelineVisitor):
"""RenameFiles will rewire read/write paths for unit testing.
RenameFiles will replace the GCS files specified in the read and
write transforms to local files so the pipeline can be run as a
unit test. This assumes that read and write transforms defined in snippets
have already been replaced by transforms 'DummyReadForTesting' and
'DummyReadForTesting' (see snippets_test.py).
This is as close as we can get to have code snippets that are
executed and are also ready to presented in webdocs.
"""
def __init__(self, renames):
self.renames = renames
def visit_transform(self, transform_node):
if transform_node.full_label.find('DummyReadForTesting') >= 0:
transform_node.transform.fn.file_to_read = self.renames['read']
elif transform_node.full_label.find('DummyWriteForTesting') >= 0:
transform_node.transform.fn.file_to_write = self.renames['write']
def construct_pipeline(renames):
"""A reverse words snippet as an example for constructing a pipeline."""
import re
class ReverseWords(beam.PTransform):
"""A PTransform that reverses individual elements in a PCollection."""
def expand(self, pcoll):
return pcoll | beam.Map(lambda e: e[::-1])
def filter_words(unused_x):
"""Pass through filter to select everything."""
return True
# [START pipelines_constructing_creating]
from apache_beam.utils.pipeline_options import PipelineOptions
p = beam.Pipeline(options=PipelineOptions())
# [END pipelines_constructing_creating]
p = TestPipeline() # Use TestPipeline for testing.
# [START pipelines_constructing_reading]
lines = p | 'ReadMyFile' >> beam.io.ReadFromText('gs://some/inputData.txt')
# [END pipelines_constructing_reading]
# [START pipelines_constructing_applying]
words = lines | beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
reversed_words = words | ReverseWords()
# [END pipelines_constructing_applying]
# [START pipelines_constructing_writing]
filtered_words = reversed_words | 'FilterWords' >> beam.Filter(filter_words)
filtered_words | 'WriteMyFile' >> beam.io.WriteToText(
'gs://some/outputData.txt')
# [END pipelines_constructing_writing]
p.visit(SnippetUtils.RenameFiles(renames))
# [START pipelines_constructing_running]
p.run()
# [END pipelines_constructing_running]
def model_pipelines(argv):
"""A wordcount snippet as a simple pipeline example."""
# [START model_pipelines]
import re
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
dest='input',
default='gs://dataflow-samples/shakespeare/kinglear'
'.txt',
help='Input file to process.')
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
pipeline_options = PipelineOptions(argv)
my_options = pipeline_options.view_as(MyOptions)
p = beam.Pipeline(options=pipeline_options)
(p
| beam.io.ReadFromText(my_options.input)
| beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
| beam.Map(lambda x: (x, 1))
| beam.combiners.Count.PerKey()
| beam.io.WriteToText(my_options.output))
result = p.run()
# [END model_pipelines]
result.wait_until_finish()
def model_pcollection(argv):
"""Creating a PCollection from data in local memory."""
from apache_beam.utils.pipeline_options import PipelineOptions
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--output',
dest='output',
required=True,
help='Output file to write results to.')
pipeline_options = PipelineOptions(argv)
my_options = pipeline_options.view_as(MyOptions)
# [START model_pcollection]
p = beam.Pipeline(options=pipeline_options)
(p
| beam.Create([
'To be, or not to be: that is the question: ',
'Whether \'tis nobler in the mind to suffer ',
'The slings and arrows of outrageous fortune, ',
'Or to take arms against a sea of troubles, '])
| beam.io.WriteToText(my_options.output))
result = p.run()
# [END model_pcollection]
result.wait_until_finish()
def pipeline_options_remote(argv):
"""Creating a Pipeline using a PipelineOptions object for remote execution."""
from apache_beam import Pipeline
from apache_beam.utils.pipeline_options import PipelineOptions
# [START pipeline_options_create]
options = PipelineOptions(flags=argv)
# [END pipeline_options_create]
# [START pipeline_options_define_custom]
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input')
parser.add_argument('--output')
# [END pipeline_options_define_custom]
from apache_beam.utils.pipeline_options import GoogleCloudOptions
from apache_beam.utils.pipeline_options import StandardOptions
# [START pipeline_options_dataflow_service]
# Create and set your PipelineOptions.
options = PipelineOptions(flags=argv)
# For Cloud execution, set the Cloud Platform project, job_name,
# staging location, temp_location and specify DataflowRunner.
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.project = 'my-project-id'
google_cloud_options.job_name = 'myjob'
google_cloud_options.staging_location = 'gs://my-bucket/binaries'
google_cloud_options.temp_location = 'gs://my-bucket/temp'
options.view_as(StandardOptions).runner = 'DataflowRunner'
# Create the Pipeline with the specified options.
p = Pipeline(options=options)
# [END pipeline_options_dataflow_service]
my_options = options.view_as(MyOptions)
my_input = my_options.input
my_output = my_options.output
p = TestPipeline() # Use TestPipeline for testing.
lines = p | beam.io.ReadFromText(my_input)
lines | beam.io.WriteToText(my_output)
p.run()
def pipeline_options_local(argv):
"""Creating a Pipeline using a PipelineOptions object for local execution."""
from apache_beam import Pipeline
from apache_beam.utils.pipeline_options import PipelineOptions
options = PipelineOptions(flags=argv)
# [START pipeline_options_define_custom_with_help_and_default]
class MyOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
default='gs://my-bucket/input')
parser.add_argument('--output',
help='Output for the pipeline',
default='gs://my-bucket/output')
# [END pipeline_options_define_custom_with_help_and_default]
my_options = options.view_as(MyOptions)
my_input = my_options.input
my_output = my_options.output
# [START pipeline_options_local]
# Create and set your Pipeline Options.
options = PipelineOptions()
p = Pipeline(options=options)
# [END pipeline_options_local]
p = TestPipeline() # Use TestPipeline for testing.
lines = p | beam.io.ReadFromText(my_input)
lines | beam.io.WriteToText(my_output)
p.run()
def pipeline_options_command_line(argv):
"""Creating a Pipeline by passing a list of arguments."""
# [START pipeline_options_command_line]
# Use Python argparse module to parse custom arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--input')
parser.add_argument('--output')
known_args, pipeline_args = parser.parse_known_args(argv)
# Create the Pipeline with remaining arguments.
p = beam.Pipeline(argv=pipeline_args)
lines = p | 'ReadFromText' >> beam.io.ReadFromText(known_args.input)
lines | 'WriteToText' >> beam.io.WriteToText(known_args.output)
# [END pipeline_options_command_line]
p.run().wait_until_finish()
def pipeline_logging(lines, output):
"""Logging Pipeline Messages."""
import re
import apache_beam as beam
# [START pipeline_logging]
# import Python logging module.
import logging
class ExtractWordsFn(beam.DoFn):
def process(self, element):
words = re.findall(r'[A-Za-z\']+', element)
for word in words:
yield word
if word.lower() == 'love':
# Log using the root logger at info or higher levels
logging.info('Found : %s', word.lower())
# Remaining WordCount example code ...
# [END pipeline_logging]
p = TestPipeline() # Use TestPipeline for testing.
(p
| beam.Create(lines)
| beam.ParDo(ExtractWordsFn())
| beam.io.WriteToText(output))
p.run()
def pipeline_monitoring(renames):
"""Using monitoring interface snippets."""
import re
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
class WordCountOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
default='gs://my-bucket/input')
parser.add_argument('--output',
help='output for the pipeline',
default='gs://my-bucket/output')
class ExtractWordsFn(beam.DoFn):
def process(self, element):
words = re.findall(r'[A-Za-z\']+', element)
for word in words:
yield word
class FormatCountsFn(beam.DoFn):
def process(self, element):
word, count = element
yield '%s: %s' % (word, count)
# [START pipeline_monitoring_composite]
# The CountWords Composite Transform inside the WordCount pipeline.
class CountWords(beam.PTransform):
def expand(self, pcoll):
return (pcoll
# Convert lines of text into individual words.
| 'ExtractWords' >> beam.ParDo(ExtractWordsFn())
# Count the number of times each word occurs.
| beam.combiners.Count.PerElement()
# Format each word and count into a printable string.
| 'FormatCounts' >> beam.ParDo(FormatCountsFn()))
# [END pipeline_monitoring_composite]
pipeline_options = PipelineOptions()
options = pipeline_options.view_as(WordCountOptions)
p = TestPipeline() # Use TestPipeline for testing.
# [START pipeline_monitoring_execution]
(p
# Read the lines of the input text.
| 'ReadLines' >> beam.io.ReadFromText(options.input)
# Count the words.
| CountWords()
# Write the formatted word counts to output.
| 'WriteCounts' >> beam.io.WriteToText(options.output))
# [END pipeline_monitoring_execution]
p.visit(SnippetUtils.RenameFiles(renames))
p.run()
def examples_wordcount_minimal(renames):
"""MinimalWordCount example snippets."""
import re
import apache_beam as beam
from apache_beam.utils.pipeline_options import GoogleCloudOptions
from apache_beam.utils.pipeline_options import StandardOptions
from apache_beam.utils.pipeline_options import PipelineOptions
# [START examples_wordcount_minimal_options]
options = PipelineOptions()
google_cloud_options = options.view_as(GoogleCloudOptions)
google_cloud_options.project = 'my-project-id'
google_cloud_options.job_name = 'myjob'
google_cloud_options.staging_location = 'gs://your-bucket-name-here/staging'
google_cloud_options.temp_location = 'gs://your-bucket-name-here/temp'
options.view_as(StandardOptions).runner = 'DataflowRunner'
# [END examples_wordcount_minimal_options]
# Run it locally for testing.
options = PipelineOptions()
# [START examples_wordcount_minimal_create]
p = beam.Pipeline(options=options)
# [END examples_wordcount_minimal_create]
(
# [START examples_wordcount_minimal_read]
p | beam.io.ReadFromText(
'gs://dataflow-samples/shakespeare/kinglear.txt')
# [END examples_wordcount_minimal_read]
# [START examples_wordcount_minimal_pardo]
| 'ExtractWords' >> beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
# [END examples_wordcount_minimal_pardo]
# [START examples_wordcount_minimal_count]
| beam.combiners.Count.PerElement()
# [END examples_wordcount_minimal_count]
# [START examples_wordcount_minimal_map]
| beam.Map(lambda (word, count): '%s: %s' % (word, count))
# [END examples_wordcount_minimal_map]
# [START examples_wordcount_minimal_write]
| beam.io.WriteToText('gs://my-bucket/counts.txt')
# [END examples_wordcount_minimal_write]
)
p.visit(SnippetUtils.RenameFiles(renames))
# [START examples_wordcount_minimal_run]
result = p.run()
# [END examples_wordcount_minimal_run]
result.wait_until_finish()
def examples_wordcount_wordcount(renames):
"""WordCount example snippets."""
import re
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
argv = []
# [START examples_wordcount_wordcount_options]
class WordCountOptions(PipelineOptions):
@classmethod
def _add_argparse_args(cls, parser):
parser.add_argument('--input',
help='Input for the pipeline',
default='gs://my-bucket/input')
options = PipelineOptions(argv)
p = beam.Pipeline(options=options)
# [END examples_wordcount_wordcount_options]
lines = p | beam.io.ReadFromText(
'gs://dataflow-samples/shakespeare/kinglear.txt')
# [START examples_wordcount_wordcount_composite]
class CountWords(beam.PTransform):
def expand(self, pcoll):
return (pcoll
# Convert lines of text into individual words.
| 'ExtractWords' >> beam.FlatMap(
lambda x: re.findall(r'[A-Za-z\']+', x))
# Count the number of times each word occurs.
| beam.combiners.Count.PerElement())
counts = lines | CountWords()
# [END examples_wordcount_wordcount_composite]
# [START examples_wordcount_wordcount_dofn]
class FormatAsTextFn(beam.DoFn):
def process(self, element):
word, count = element
yield '%s: %s' % (word, count)
formatted = counts | beam.ParDo(FormatAsTextFn())
# [END examples_wordcount_wordcount_dofn]
formatted | beam.io.WriteToText('gs://my-bucket/counts.txt')
p.visit(SnippetUtils.RenameFiles(renames))
p.run().wait_until_finish()
def examples_wordcount_debugging(renames):
"""DebuggingWordCount example snippets."""
import re
import apache_beam as beam
# [START example_wordcount_debugging_logging]
# [START example_wordcount_debugging_aggregators]
import logging
class FilterTextFn(beam.DoFn):
"""A DoFn that filters for a specific key based on a regular expression."""
def __init__(self, pattern):
self.pattern = pattern
# A custom metric can track values in your pipeline as it runs. Create
# custom metrics matched_word and unmatched_words.
self.matched_words = Metrics.counter(self.__class__, 'matched_words')
self.umatched_words = Metrics.counter(self.__class__, 'umatched_words')
def process(self, element):
word, _ = element
if re.match(self.pattern, word):
# Log at INFO level each element we match. When executing this pipeline
# using the Dataflow service, these log lines will appear in the Cloud
# Logging UI.
logging.info('Matched %s', word)
# Add 1 to the custom metric counter matched_words
self.matched_words.inc()
yield element
else:
# Log at the "DEBUG" level each element that is not matched. Different
# log levels can be used to control the verbosity of logging providing
# an effective mechanism to filter less important information. Note
# currently only "INFO" and higher level logs are emitted to the Cloud
# Logger. This log message will not be visible in the Cloud Logger.
logging.debug('Did not match %s', word)
# Add 1 to the custom metric counter umatched_words
self.umatched_words.inc()
# [END example_wordcount_debugging_logging]
# [END example_wordcount_debugging_aggregators]
p = TestPipeline() # Use TestPipeline for testing.
filtered_words = (
p
| beam.io.ReadFromText(
'gs://dataflow-samples/shakespeare/kinglear.txt')
| 'ExtractWords' >> beam.FlatMap(lambda x: re.findall(r'[A-Za-z\']+', x))
| beam.combiners.Count.PerElement()
| 'FilterText' >> beam.ParDo(FilterTextFn('Flourish|stomach')))
# [START example_wordcount_debugging_assert]
beam.assert_that(
filtered_words, beam.equal_to([('Flourish', 3), ('stomach', 1)]))
# [END example_wordcount_debugging_assert]
output = (filtered_words
| 'format' >> beam.Map(lambda (word, c): '%s: %s' % (word, c))
| 'Write' >> beam.io.WriteToText('gs://my-bucket/counts.txt'))
p.visit(SnippetUtils.RenameFiles(renames))
p.run()
def model_custom_source(count):
"""Demonstrates creating a new custom source and using it in a pipeline.
Defines a new source ``CountingSource`` that produces integers starting from 0
up to a given size.
Uses the new source in an example pipeline.
Additionally demonstrates how a source should be implemented using a
``PTransform``. This is the recommended way to develop sources that are to
distributed to a large number of end users.
This method runs two pipelines.
(1) A pipeline that uses ``CountingSource`` directly using the ``df.Read``
transform.
(2) A pipeline that uses a custom ``PTransform`` that wraps
``CountingSource``.
Args:
count: the size of the counting source to be used in the pipeline
demonstrated in this method.
"""
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.io.range_trackers import OffsetRangeTracker
from apache_beam.transforms.core import PTransform
from apache_beam.utils.pipeline_options import PipelineOptions
# Defining a new source.
# [START model_custom_source_new_source]
class CountingSource(iobase.BoundedSource):
def __init__(self, count):
self._count = count
def estimate_size(self):
return self._count
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = self._count
return OffsetRangeTracker(start_position, stop_position)
def read(self, range_tracker):
for i in range(self._count):
if not range_tracker.try_claim(i):
return
yield i
def split(self, desired_bundle_size, start_position=None,
stop_position=None):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = self._count
bundle_start = start_position
while bundle_start < self._count:
bundle_stop = max(self._count, bundle_start + desired_bundle_size)
yield iobase.SourceBundle(weight=(bundle_stop - bundle_start),
source=self,
start_position=bundle_start,
stop_position=bundle_stop)
bundle_start = bundle_stop
# [END model_custom_source_new_source]
# Using the source in an example pipeline.
# [START model_custom_source_use_new_source]
p = beam.Pipeline(options=PipelineOptions())
numbers = p | 'ProduceNumbers' >> beam.io.Read(CountingSource(count))
# [END model_custom_source_use_new_source]
lines = numbers | beam.core.Map(lambda number: 'line %d' % number)
beam.assert_that(
lines, beam.equal_to(
['line ' + str(number) for number in range(0, count)]))
p.run().wait_until_finish()
# We recommend users to start Source classes with an underscore to discourage
# using the Source class directly when a PTransform for the source is
# available. We simulate that here by simply extending the previous Source
# class.
class _CountingSource(CountingSource):
pass
# [START model_custom_source_new_ptransform]
class ReadFromCountingSource(PTransform):
def __init__(self, count, **kwargs):
super(ReadFromCountingSource, self).__init__(**kwargs)
self._count = count
def expand(self, pcoll):
return pcoll | iobase.Read(_CountingSource(count))
# [END model_custom_source_new_ptransform]
# [START model_custom_source_use_ptransform]
p = beam.Pipeline(options=PipelineOptions())
numbers = p | 'ProduceNumbers' >> ReadFromCountingSource(count)
# [END model_custom_source_use_ptransform]
lines = numbers | beam.core.Map(lambda number: 'line %d' % number)
beam.assert_that(
lines, beam.equal_to(
['line ' + str(number) for number in range(0, count)]))
p.run().wait_until_finish()
def model_custom_sink(simplekv, KVs, final_table_name_no_ptransform,
final_table_name_with_ptransform):
"""Demonstrates creating a new custom sink and using it in a pipeline.
Defines a new sink ``SimpleKVSink`` that demonstrates writing to a simple
key-value based storage system which has following API.
simplekv.connect(url) -
connects to the storage system and returns an access token which can be
used to perform further operations
simplekv.open_table(access_token, table_name) -
creates a table named 'table_name'. Returns a table object.
simplekv.write_to_table(access_token, table, key, value) -
writes a key-value pair to the given table.
simplekv.rename_table(access_token, old_name, new_name) -
renames the table named 'old_name' to 'new_name'.
Uses the new sink in an example pipeline.
Additionally demonstrates how a sink should be implemented using a
``PTransform``. This is the recommended way to develop sinks that are to be
distributed to a large number of end users.
This method runs two pipelines.
(1) A pipeline that uses ``SimpleKVSink`` directly using the ``df.Write``
transform.
(2) A pipeline that uses a custom ``PTransform`` that wraps
``SimpleKVSink``.
Args:
simplekv: an object that mocks the key-value storage.
KVs: the set of key-value pairs to be written in the example pipeline.
final_table_name_no_ptransform: the prefix of final set of tables to be
created by the example pipeline that uses
``SimpleKVSink`` directly.
final_table_name_with_ptransform: the prefix of final set of tables to be
created by the example pipeline that uses
a ``PTransform`` that wraps
``SimpleKVSink``.
"""
import apache_beam as beam
from apache_beam.io import iobase
from apache_beam.transforms.core import PTransform
from apache_beam.utils.pipeline_options import PipelineOptions
# Defining the new sink.
# [START model_custom_sink_new_sink]
class SimpleKVSink(iobase.Sink):
def __init__(self, url, final_table_name):
self._url = url
self._final_table_name = final_table_name
def initialize_write(self):
access_token = simplekv.connect(self._url)
return access_token
def open_writer(self, access_token, uid):
table_name = 'table' + uid
return SimpleKVWriter(access_token, table_name)
def finalize_write(self, access_token, table_names):
for i, table_name in enumerate(table_names):
simplekv.rename_table(
access_token, table_name, self._final_table_name + str(i))
# [END model_custom_sink_new_sink]
# Defining a writer for the new sink.
# [START model_custom_sink_new_writer]
class SimpleKVWriter(iobase.Writer):
def __init__(self, access_token, table_name):
self._access_token = access_token
self._table_name = table_name
self._table = simplekv.open_table(access_token, table_name)
def write(self, record):
key, value = record
simplekv.write_to_table(self._access_token, self._table, key, value)
def close(self):
return self._table_name
# [END model_custom_sink_new_writer]
final_table_name = final_table_name_no_ptransform
# Using the new sink in an example pipeline.
# [START model_custom_sink_use_new_sink]
p = beam.Pipeline(options=PipelineOptions())
kvs = p | 'CreateKVs' >> beam.Create(KVs)
kvs | 'WriteToSimpleKV' >> beam.io.Write(
SimpleKVSink('http://url_to_simple_kv/', final_table_name))
# [END model_custom_sink_use_new_sink]
p.run().wait_until_finish()
# We recommend users to start Sink class names with an underscore to
# discourage using the Sink class directly when a PTransform for the sink is
# available. We simulate that here by simply extending the previous Sink
# class.
class _SimpleKVSink(SimpleKVSink):
pass
# [START model_custom_sink_new_ptransform]
class WriteToKVSink(PTransform):
def __init__(self, url, final_table_name, **kwargs):
super(WriteToKVSink, self).__init__(**kwargs)
self._url = url
self._final_table_name = final_table_name
def expand(self, pcoll):
return pcoll | iobase.Write(_SimpleKVSink(self._url,
self._final_table_name))
# [END model_custom_sink_new_ptransform]
final_table_name = final_table_name_with_ptransform
# [START model_custom_sink_use_ptransform]
p = beam.Pipeline(options=PipelineOptions())
kvs = p | 'CreateKVs' >> beam.core.Create(KVs)
kvs | 'WriteToSimpleKV' >> WriteToKVSink(
'http://url_to_simple_kv/', final_table_name)
# [END model_custom_sink_use_ptransform]
p.run().wait_until_finish()
def model_textio(renames):
"""Using a Read and Write transform to read/write text files."""
def filter_words(x):
import re
return re.findall(r'[A-Za-z\']+', x)
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
# [START model_textio_read]
p = beam.Pipeline(options=PipelineOptions())
# [START model_pipelineio_read]
lines = p | 'ReadFromText' >> beam.io.ReadFromText('path/to/input-*.csv')
# [END model_pipelineio_read]
# [END model_textio_read]
# [START model_textio_write]
filtered_words = lines | 'FilterWords' >> beam.FlatMap(filter_words)
# [START model_pipelineio_write]
filtered_words | 'WriteToText' >> beam.io.WriteToText(
'/path/to/numbers', file_name_suffix='.csv')
# [END model_pipelineio_write]
# [END model_textio_write]
p.visit(SnippetUtils.RenameFiles(renames))
p.run().wait_until_finish()
def model_textio_compressed(renames, expected):
"""Using a Read Transform to read compressed text files."""
p = TestPipeline()
# [START model_textio_write_compressed]
lines = p | 'ReadFromText' >> beam.io.ReadFromText(
'/path/to/input-*.csv.gz',
compression_type=beam.io.fileio.CompressionTypes.GZIP)
# [END model_textio_write_compressed]
beam.assert_that(lines, beam.equal_to(expected))
p.visit(SnippetUtils.RenameFiles(renames))
p.run().wait_until_finish()
def model_datastoreio():
"""Using a Read and Write transform to read/write to Cloud Datastore."""
import uuid
from google.cloud.proto.datastore.v1 import entity_pb2
from google.cloud.proto.datastore.v1 import query_pb2
import googledatastore
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
from apache_beam.io.gcp.datastore.v1.datastoreio import ReadFromDatastore
from apache_beam.io.gcp.datastore.v1.datastoreio import WriteToDatastore
project = 'my_project'
kind = 'my_kind'
query = query_pb2.Query()
query.kind.add().name = kind
# [START model_datastoreio_read]
p = beam.Pipeline(options=PipelineOptions())
entities = p | 'Read From Datastore' >> ReadFromDatastore(project, query)
# [END model_datastoreio_read]
# [START model_datastoreio_write]
p = beam.Pipeline(options=PipelineOptions())
musicians = p | 'Musicians' >> beam.Create(
['Mozart', 'Chopin', 'Beethoven', 'Vivaldi'])
def to_entity(content):
entity = entity_pb2.Entity()
googledatastore.helper.add_key_path(entity.key, kind, str(uuid.uuid4()))
googledatastore.helper.add_properties(entity, {'content': unicode(content)})
return entity
entities = musicians | 'To Entity' >> beam.Map(to_entity)
entities | 'Write To Datastore' >> WriteToDatastore(project)
# [END model_datastoreio_write]
def model_bigqueryio():
"""Using a Read and Write transform to read/write to BigQuery."""
import apache_beam as beam
from apache_beam.utils.pipeline_options import PipelineOptions
# [START model_bigqueryio_read]
p = beam.Pipeline(options=PipelineOptions())
weather_data = p | 'ReadWeatherStations' >> beam.io.Read(
beam.io.BigQuerySource(
'clouddataflow-readonly:samples.weather_stations'))
# [END model_bigqueryio_read]
# [START model_bigqueryio_query]
p = beam.Pipeline(options=PipelineOptions())
weather_data = p | 'ReadYearAndTemp' >> beam.io.Read(
beam.io.BigQuerySource(
query='SELECT year, mean_temp FROM samples.weather_stations'))
# [END model_bigqueryio_query]
# [START model_bigqueryio_query_standard_sql]
p = beam.Pipeline(options=PipelineOptions())
weather_data = p | 'ReadYearAndTemp' >> beam.io.Read(
beam.io.BigQuerySource(
query='SELECT year, mean_temp FROM `samples.weather_stations`',
use_standard_sql=True))
# [END model_bigqueryio_query_standard_sql]
# [START model_bigqueryio_schema]
schema = 'source:STRING, quote:STRING'
# [END model_bigqueryio_schema]
# [START model_bigqueryio_write]
quotes = p | beam.Create(
[{'source': 'Mahatma Ghandi', 'quote': 'My life is my message.'}])
quotes | 'Write' >> beam.io.Write(
beam.io.BigQuerySink(
'my-project:output.output_table',
schema=schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED))
# [END model_bigqueryio_write]
def model_composite_transform_example(contents, output_path):
"""Example of a composite transform.
To declare a composite transform, define a subclass of PTransform.
To override the apply method, define a method "apply" that
takes a PCollection as its only parameter and returns a PCollection.
"""
import re
import apache_beam as beam
# [START composite_transform_example]
# [START composite_ptransform_apply_method]
# [START composite_ptransform_declare]
class CountWords(beam.PTransform):
# [END composite_ptransform_declare]
def expand(self, pcoll):
return (pcoll
| beam.FlatMap(lambda x: re.findall(r'\w+', x))
| beam.combiners.Count.PerElement()
| beam.Map(lambda (word, c): '%s: %s' % (word, c)))
# [END composite_ptransform_apply_method]
# [END composite_transform_example]
p = TestPipeline() # Use TestPipeline for testing.
(p
| beam.Create(contents)
| CountWords()
| beam.io.WriteToText(output_path))
p.run()
def model_multiple_pcollections_flatten(contents, output_path):
"""Merging a PCollection with Flatten."""
some_hash_fn = lambda s: ord(s[0])
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
partition_fn = lambda element, partitions: some_hash_fn(element) % partitions
# Partition into deciles
partitioned = p | beam.Create(contents) | beam.Partition(partition_fn, 3)
pcoll1 = partitioned[0]
pcoll2 = partitioned[1]
pcoll3 = partitioned[2]
# Flatten them back into 1
# A collection of PCollection objects can be represented simply
# as a tuple (or list) of PCollections.
# (The SDK for Python has no separate type to store multiple
# PCollection objects, whether containing the same or different
# types.)
# [START model_multiple_pcollections_flatten]
merged = (
# [START model_multiple_pcollections_tuple]
(pcoll1, pcoll2, pcoll3)
# [END model_multiple_pcollections_tuple]
# A list of tuples can be "piped" directly into a Flatten transform.
| beam.Flatten())
# [END model_multiple_pcollections_flatten]
merged | beam.io.WriteToText(output_path)
p.run()
def model_multiple_pcollections_partition(contents, output_path):
"""Splitting a PCollection with Partition."""
some_hash_fn = lambda s: ord(s[0])
def get_percentile(i):
"""Assume i in [0,100)."""
return i
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
students = p | beam.Create(contents)
# [START model_multiple_pcollections_partition]
def partition_fn(student, num_partitions):
return int(get_percentile(student) * num_partitions / 100)
by_decile = students | beam.Partition(partition_fn, 10)
# [END model_multiple_pcollections_partition]
# [START model_multiple_pcollections_partition_40th]
fortieth_percentile = by_decile[4]
# [END model_multiple_pcollections_partition_40th]
([by_decile[d] for d in xrange(10) if d != 4] + [fortieth_percentile]
| beam.Flatten()
| beam.io.WriteToText(output_path))
p.run()
def model_group_by_key(contents, output_path):
"""Applying a GroupByKey Transform."""
import re
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
words_and_counts = (
p
| beam.Create(contents)
| beam.FlatMap(lambda x: re.findall(r'\w+', x))
| 'one word' >> beam.Map(lambda w: (w, 1)))
# GroupByKey accepts a PCollection of (w, 1) and
# outputs a PCollection of (w, (1, 1, ...)).
# (A key/value pair is just a tuple in Python.)
# This is a somewhat forced example, since one could
# simply use beam.combiners.Count.PerElement here.
# [START model_group_by_key_transform]
grouped_words = words_and_counts | beam.GroupByKey()
# [END model_group_by_key_transform]
(grouped_words
| 'count words' >> beam.Map(lambda (word, counts): (word, len(counts)))
| beam.io.WriteToText(output_path))
p.run()
def model_co_group_by_key_tuple(email_list, phone_list, output_path):
"""Applying a CoGroupByKey Transform to a tuple."""
import apache_beam as beam
p = TestPipeline() # Use TestPipeline for testing.
# [START model_group_by_key_cogroupbykey_tuple]
# Each data set is represented by key-value pairs in separate PCollections.
# Both data sets share a common key type (in this example str).
# The email_list contains values such as: ('joe', 'joe@example.com') with
# multiple possible values for each key.
# The phone_list contains values such as: ('mary': '111-222-3333') with
# multiple possible values for each key.
emails = p | 'email' >> beam.Create(email_list)
phones = p | 'phone' >> beam.Create(phone_list)
# The result PCollection contains one key-value element for each key in the
# input PCollections. The key of the pair will be the key from the input and
# the value will be a dictionary with two entries: 'emails' - an iterable of
# all values for the current key in the emails PCollection and 'phones': an
# iterable of all values for the current key in the phones PCollection.
# For instance, if 'emails' contained ('joe', 'joe@example.com') and
# ('joe', 'joe@gmail.com'), then 'result' will contain the element
# ('joe', {'emails': ['joe@example.com', 'joe@gmail.com'], 'phones': ...})
result = {'emails': emails, 'phones': phones} | beam.CoGroupByKey()
def join_info((name, info)):
return '; '.join(['%s' % name,
'%s' % ','.join(info['emails']),
'%s' % ','.join(info['phones'])])
contact_lines = result | beam.Map(join_info)
# [END model_group_by_key_cogroupbykey_tuple]
contact_lines | beam.io.WriteToText(output_path)
p.run()
def model_join_using_side_inputs(
name_list, email_list, phone_list, output_path):
"""Joining PCollections using side inputs."""
import apache_beam as beam
from apache_beam.pvalue import AsIter
p = TestPipeline() # Use TestPipeline for testing.
# [START model_join_using_side_inputs]
# This code performs a join by receiving the set of names as an input and
# passing PCollections that contain emails and phone numbers as side inputs
# instead of using CoGroupByKey.
names = p | 'names' >> beam.Create(name_list)
emails = p | 'email' >> beam.Create(email_list)
phones = p | 'phone' >> beam.Create(phone_list)
def join_info(name, emails, phone_numbers):
filtered_emails = []
for name_in_list, email in emails:
if name_in_list == name:
filtered_emails.append(email)
filtered_phone_numbers = []
for name_in_list, phone_number in phone_numbers:
if name_in_list == name:
filtered_phone_numbers.append(phone_number)
return '; '.join(['%s' % name,
'%s' % ','.join(filtered_emails),
'%s' % ','.join(filtered_phone_numbers)])
contact_lines = names | 'CreateContacts' >> beam.core.Map(
join_info, AsIter(emails), AsIter(phones))
# [END model_join_using_side_inputs]
contact_lines | beam.io.WriteToText(output_path)
p.run()
# [START model_library_transforms_keys]
class Keys(beam.PTransform):
def expand(self, pcoll):
return pcoll | 'Keys' >> beam.Map(lambda (k, v): k)
# [END model_library_transforms_keys]
# pylint: enable=invalid-name
# [START model_library_transforms_count]
class Count(beam.PTransform):
def expand(self, pcoll):
return (
pcoll
| 'PairWithOne' >> beam.Map(lambda v: (v, 1))
| beam.CombinePerKey(sum))
# [END model_library_transforms_count]
|
chamikaramj/incubator-beam
|
sdks/python/apache_beam/examples/snippets/snippets.py
|
Python
|
apache-2.0
| 40,001
|
# coding=utf-8
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for analyzing and storing the state of Python code blocks."""
from __future__ import unicode_literals
import abc
import collections
import re
from grumpy.compiler import expr
from grumpy.compiler import util
from grumpy.pythonparser import algorithm
from grumpy.pythonparser import ast
from grumpy.pythonparser import source
_non_word_re = re.compile('[^A-Za-z0-9_]')
class Package(object):
"""A Go package import."""
def __init__(self, name, alias=None):
self.name = name
# Use Γ as a separator since it provides readability with a low
# probability of name collisions.
self.alias = alias or 'π_' + name.replace('/', 'Γ').replace('.', 'Γ')
class Loop(object):
"""Represents a for or while loop within a particular block."""
def __init__(self, breakvar):
self.breakvar = breakvar
class Block(object):
"""Represents a Python block such as a function or class definition."""
__metaclass__ = abc.ABCMeta
def __init__(self, parent, name):
self.root = parent.root if parent else self
self.parent = parent
self.name = name
self.free_temps = set()
self.used_temps = set()
self.temp_index = 0
self.label_count = 0
self.checkpoints = set()
self.loop_stack = []
self.is_generator = False
@abc.abstractmethod
def bind_var(self, writer, name, value):
"""Writes Go statements for assigning value to named var in this block.
This is overridden in the different concrete block types since in Python,
binding a variable in, e.g. a function is quite different than binding at
global block.
Args:
writer: The Writer object where statements will be written.
name: The name of the Python variable.
value: A Go expression to assign to the variable.
"""
pass
@abc.abstractmethod
def del_var(self, writer, name):
pass
@abc.abstractmethod
def resolve_name(self, writer, name):
"""Returns a GeneratedExpr object for accessing the named var in this block.
This is overridden in the different concrete block types since name
resolution in Python behaves differently depending on where in what kind of
block its happening within, e.g. local vars are different than globals.
Args:
writer: Writer object where intermediate calculations will be printed.
name: The name of the Python variable.
"""
pass
def genlabel(self, is_checkpoint=False):
self.label_count += 1
if is_checkpoint:
self.checkpoints.add(self.label_count)
return self.label_count
def alloc_temp(self, type_='*πg.Object'):
"""Create a new temporary Go variable having type type_ for this block."""
for v in sorted(self.free_temps, key=lambda k: k.name):
if v.type_ == type_:
self.free_temps.remove(v)
self.used_temps.add(v)
return v
self.temp_index += 1
name = 'πTemp{:03d}'.format(self.temp_index)
v = expr.GeneratedTempVar(self, name, type_)
self.used_temps.add(v)
return v
def free_temp(self, v):
"""Release the GeneratedTempVar v so it can be reused."""
self.used_temps.remove(v)
self.free_temps.add(v)
def push_loop(self, breakvar):
loop = Loop(breakvar)
self.loop_stack.append(loop)
return loop
def pop_loop(self):
self.loop_stack.pop()
def top_loop(self):
return self.loop_stack[-1]
def _resolve_global(self, writer, name):
result = self.alloc_temp()
writer.write_checked_call2(
result, 'πg.ResolveGlobal(πF, {})', self.root.intern(name))
return result
class ModuleBlock(Block):
"""Python block for a module."""
def __init__(self, importer, full_package_name,
filename, src, future_features):
Block.__init__(self, None, '<module>')
self.importer = importer
self.full_package_name = full_package_name
self.filename = filename
self.buffer = source.Buffer(src)
self.strings = set()
self.future_features = future_features
def bind_var(self, writer, name, value):
writer.write_checked_call1(
'πF.Globals().SetItem(πF, {}.ToObject(), {})',
self.intern(name), value)
def del_var(self, writer, name):
writer.write_checked_call1('πg.DelVar(πF, πF.Globals(), {})',
self.intern(name))
def resolve_name(self, writer, name):
return self._resolve_global(writer, name)
def intern(self, s):
if len(s) > 64 or _non_word_re.search(s):
return 'πg.NewStr({})'.format(util.go_str(s))
self.strings.add(s)
return 'ß' + s
class ClassBlock(Block):
"""Python block for a class definition."""
def __init__(self, parent, name, global_vars):
Block.__init__(self, parent, name)
self.global_vars = global_vars
def bind_var(self, writer, name, value):
if name in self.global_vars:
return self.root.bind_var(writer, name, value)
writer.write_checked_call1('πClass.SetItem(πF, {}.ToObject(), {})',
self.root.intern(name), value)
def del_var(self, writer, name):
if name in self.global_vars:
return self.root.del_var(writer, name)
writer.write_checked_call1('πg.DelVar(πF, πClass, {})',
self.root.intern(name))
def resolve_name(self, writer, name):
local = 'nil'
if name not in self.global_vars:
# Only look for a local in an outer block when name hasn't been declared
# global in this block. If it has been declared global then we fallback
# straight to the global dict.
block = self.parent
while not isinstance(block, ModuleBlock):
if isinstance(block, FunctionBlock) and name in block.vars:
var = block.vars[name]
if var.type != Var.TYPE_GLOBAL:
local = util.adjust_local_name(name)
# When it is declared global, prefer it to anything in outer blocks.
break
block = block.parent
result = self.alloc_temp()
writer.write_checked_call2(
result, 'πg.ResolveClass(πF, πClass, {}, {})',
local, self.root.intern(name))
return result
class FunctionBlock(Block):
"""Python block for a function definition."""
def __init__(self, parent, name, block_vars, is_generator):
Block.__init__(self, parent, name)
self.vars = block_vars
self.parent = parent
self.is_generator = is_generator
def bind_var(self, writer, name, value):
if self.vars[name].type == Var.TYPE_GLOBAL:
return self.root.bind_var(writer, name, value)
writer.write('{} = {}'.format(util.adjust_local_name(name), value))
def del_var(self, writer, name):
var = self.vars.get(name)
if not var:
raise util.ParseError(
None, 'cannot delete nonexistent local: {}'.format(name))
if var.type == Var.TYPE_GLOBAL:
return self.root.del_var(writer, name)
adjusted_name = util.adjust_local_name(name)
# Resolve local first to ensure the variable is already bound.
writer.write_checked_call1('πg.CheckLocal(πF, {}, {})',
adjusted_name, util.go_str(name))
writer.write('{} = πg.UnboundLocal'.format(adjusted_name))
def resolve_name(self, writer, name):
block = self
while not isinstance(block, ModuleBlock):
if isinstance(block, FunctionBlock):
var = block.vars.get(name)
if var:
if var.type == Var.TYPE_GLOBAL:
return self._resolve_global(writer, name)
writer.write_checked_call1('πg.CheckLocal(πF, {}, {})',
util.adjust_local_name(name),
util.go_str(name))
return expr.GeneratedLocalVar(name)
block = block.parent
return self._resolve_global(writer, name)
class Var(object):
"""A Python variable used within a particular block."""
TYPE_LOCAL = 0
TYPE_PARAM = 1
TYPE_GLOBAL = 2
def __init__(self, name, var_type, arg_index=None):
self.name = name
self.type = var_type
if var_type == Var.TYPE_LOCAL:
assert arg_index is None
self.init_expr = 'πg.UnboundLocal'
elif var_type == Var.TYPE_PARAM:
assert arg_index is not None
self.init_expr = 'πArgs[{}]'.format(arg_index)
else:
assert arg_index is None
self.init_expr = None
class BlockVisitor(algorithm.Visitor):
"""Visits nodes in a function or class to determine block variables."""
# pylint: disable=invalid-name,missing-docstring
def __init__(self):
self.vars = collections.OrderedDict()
def visit_Assign(self, node):
for target in node.targets:
self._assign_target(target)
self.visit(node.value)
def visit_AugAssign(self, node):
self._assign_target(node.target)
self.visit(node.value)
def visit_ClassDef(self, node):
self._register_local(node.name)
def visit_ExceptHandler(self, node):
if node.name:
self._register_local(node.name.id)
self.generic_visit(node)
def visit_For(self, node):
self._assign_target(node.target)
self.generic_visit(node)
def visit_FunctionDef(self, node):
# The function being defined is local to this block, i.e. is nested within
# another function. Note that further nested symbols are not traversed
# because we don't explicitly visit the function body.
self._register_local(node.name)
def visit_Global(self, node):
for name in node.names:
self._register_global(node, name)
def visit_Import(self, node):
for alias in node.names:
self._register_local(alias.asname or alias.name.split('.')[0])
def visit_ImportFrom(self, node):
for alias in node.names:
self._register_local(alias.asname or alias.name)
def visit_With(self, node):
for item in node.items:
if item.optional_vars:
self._assign_target(item.optional_vars)
self.generic_visit(node)
def _assign_target(self, target):
if isinstance(target, ast.Name):
self._register_local(target.id)
elif isinstance(target, (ast.Tuple, ast.List)):
for elt in target.elts:
self._assign_target(elt)
def _register_global(self, node, name):
var = self.vars.get(name)
if var:
if var.type == Var.TYPE_PARAM:
msg = "name '{}' is parameter and global"
raise util.ParseError(node, msg.format(name))
if var.type == Var.TYPE_LOCAL:
msg = "name '{}' is used prior to global declaration"
raise util.ParseError(node, msg.format(name))
else:
self.vars[name] = Var(name, Var.TYPE_GLOBAL)
def _register_local(self, name):
if not self.vars.get(name):
self.vars[name] = Var(name, Var.TYPE_LOCAL)
class FunctionBlockVisitor(BlockVisitor):
"""Visits function nodes to determine variables and generator state."""
# pylint: disable=invalid-name,missing-docstring
def __init__(self, node):
BlockVisitor.__init__(self)
self.is_generator = False
node_args = node.args
args = [a.arg for a in node_args.args]
if node_args.vararg:
args.append(node_args.vararg.arg)
if node_args.kwarg:
args.append(node_args.kwarg.arg)
for i, name in enumerate(args):
if name in self.vars:
msg = "duplicate argument '{}' in function definition".format(name)
raise util.ParseError(node, msg)
self.vars[name] = Var(name, Var.TYPE_PARAM, arg_index=i)
def visit_Yield(self, unused_node): # pylint: disable=unused-argument
self.is_generator = True
|
google/grumpy
|
compiler/block.py
|
Python
|
apache-2.0
| 12,060
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
import mox
from oslo.config import cfg
import stubout
import testtools
from quantum import context
from quantum.db import api as db
from quantum.extensions.flavor import (FLAVOR_NETWORK, FLAVOR_ROUTER)
from quantum.openstack.common import uuidutils
from quantum.plugins.metaplugin.meta_quantum_plugin import FlavorNotFound
from quantum.plugins.metaplugin.meta_quantum_plugin import MetaPluginV2
from quantum.tests import base
CONF_FILE = ""
ROOTDIR = os.path.dirname(os.path.dirname(__file__))
ETCDIR = os.path.join(ROOTDIR, 'etc')
META_PATH = "quantum.plugins.metaplugin"
FAKE_PATH = "quantum.tests.unit.metaplugin"
PROXY_PATH = "%s.proxy_quantum_plugin.ProxyPluginV2" % META_PATH
PLUGIN_LIST = """
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2,proxy:%s
""".strip() % (FAKE_PATH, FAKE_PATH, PROXY_PATH)
L3_PLUGIN_LIST = """
fake1:%s.fake_plugin.Fake1,fake2:%s.fake_plugin.Fake2
""".strip() % (FAKE_PATH, FAKE_PATH)
def etcdir(*p):
return os.path.join(ETCDIR, *p)
def setup_metaplugin_conf():
cfg.CONF.set_override('auth_url', 'http://localhost:35357/v2.0',
'PROXY')
cfg.CONF.set_override('auth_region', 'RegionOne', 'PROXY')
cfg.CONF.set_override('admin_user', 'quantum', 'PROXY')
cfg.CONF.set_override('admin_password', 'password', 'PROXY')
cfg.CONF.set_override('admin_tenant_name', 'service', 'PROXY')
cfg.CONF.set_override('plugin_list', PLUGIN_LIST, 'META')
cfg.CONF.set_override('l3_plugin_list', L3_PLUGIN_LIST, 'META')
cfg.CONF.set_override('default_flavor', 'fake2', 'META')
cfg.CONF.set_override('default_l3_flavor', 'fake1', 'META')
cfg.CONF.set_override('base_mac', "12:34:56:78:90:ab")
#TODO(nati) remove this after subnet quota change is merged
cfg.CONF.set_override('max_dns_nameservers', 10)
cfg.CONF.set_override('rpc_backend',
'quantum.openstack.common.rpc.impl_fake')
class MetaQuantumPluginV2Test(base.BaseTestCase):
"""Class conisting of MetaQuantumPluginV2 unit tests."""
def setUp(self):
super(MetaQuantumPluginV2Test, self).setUp()
db._ENGINE = None
db._MAKER = None
self.fake_tenant_id = uuidutils.generate_uuid()
self.context = context.get_admin_context()
db.configure_db()
setup_metaplugin_conf()
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.client_cls_p = mock.patch('quantumclient.v2_0.client.Client')
client_cls = self.client_cls_p.start()
self.client_inst = mock.Mock()
client_cls.return_value = self.client_inst
self.client_inst.create_network.return_value = \
{'id': 'fake_id'}
self.client_inst.create_port.return_value = \
{'id': 'fake_id'}
self.client_inst.create_subnet.return_value = \
{'id': 'fake_id'}
self.client_inst.update_network.return_value = \
{'id': 'fake_id'}
self.client_inst.update_port.return_value = \
{'id': 'fake_id'}
self.client_inst.update_subnet.return_value = \
{'id': 'fake_id'}
self.client_inst.delete_network.return_value = True
self.client_inst.delete_port.return_value = True
self.client_inst.delete_subnet.return_value = True
self.plugin = MetaPluginV2(configfile=None)
def _fake_network(self, flavor):
data = {'network': {'name': flavor,
'admin_state_up': True,
'shared': False,
'router:external': [],
'tenant_id': self.fake_tenant_id,
FLAVOR_NETWORK: flavor}}
return data
def _fake_port(self, net_id):
return {'port': {'name': net_id,
'network_id': net_id,
'admin_state_up': True,
'device_id': 'bad_device_id',
'device_owner': 'bad_device_owner',
'admin_state_up': True,
'host_routes': [],
'fixed_ips': [],
'mac_address':
self.plugin._generate_mac(self.context, net_id),
'tenant_id': self.fake_tenant_id}}
def _fake_subnet(self, net_id):
allocation_pools = [{'start': '10.0.0.2',
'end': '10.0.0.254'}]
return {'subnet': {'name': net_id,
'network_id': net_id,
'gateway_ip': '10.0.0.1',
'dns_nameservers': ['10.0.0.2'],
'host_routes': [],
'cidr': '10.0.0.0/24',
'allocation_pools': allocation_pools,
'enable_dhcp': True,
'ip_version': 4}}
def _fake_router(self, flavor):
data = {'router': {'name': flavor, 'admin_state_up': True,
'tenant_id': self.fake_tenant_id,
FLAVOR_ROUTER: flavor,
'external_gateway_info': None}}
return data
def test_create_delete_network(self):
network1 = self._fake_network('fake1')
ret1 = self.plugin.create_network(self.context, network1)
self.assertEqual('fake1', ret1[FLAVOR_NETWORK])
network2 = self._fake_network('fake2')
ret2 = self.plugin.create_network(self.context, network2)
self.assertEqual('fake2', ret2[FLAVOR_NETWORK])
network3 = self._fake_network('proxy')
ret3 = self.plugin.create_network(self.context, network3)
self.assertEqual('proxy', ret3[FLAVOR_NETWORK])
db_ret1 = self.plugin.get_network(self.context, ret1['id'])
self.assertEqual('fake1', db_ret1['name'])
db_ret2 = self.plugin.get_network(self.context, ret2['id'])
self.assertEqual('fake2', db_ret2['name'])
db_ret3 = self.plugin.get_network(self.context, ret3['id'])
self.assertEqual('proxy', db_ret3['name'])
db_ret4 = self.plugin.get_networks(self.context)
self.assertEqual(3, len(db_ret4))
db_ret5 = self.plugin.get_networks(self.context,
{FLAVOR_NETWORK: ['fake1']})
self.assertEqual(1, len(db_ret5))
self.assertEqual('fake1', db_ret5[0]['name'])
self.plugin.delete_network(self.context, ret1['id'])
self.plugin.delete_network(self.context, ret2['id'])
self.plugin.delete_network(self.context, ret3['id'])
def test_create_delete_port(self):
network1 = self._fake_network('fake1')
network_ret1 = self.plugin.create_network(self.context, network1)
network2 = self._fake_network('fake2')
network_ret2 = self.plugin.create_network(self.context, network2)
network3 = self._fake_network('proxy')
network_ret3 = self.plugin.create_network(self.context, network3)
port1 = self._fake_port(network_ret1['id'])
port2 = self._fake_port(network_ret2['id'])
port3 = self._fake_port(network_ret3['id'])
port1_ret = self.plugin.create_port(self.context, port1)
port2_ret = self.plugin.create_port(self.context, port2)
port3_ret = self.plugin.create_port(self.context, port3)
self.assertEqual(network_ret1['id'], port1_ret['network_id'])
self.assertEqual(network_ret2['id'], port2_ret['network_id'])
self.assertEqual(network_ret3['id'], port3_ret['network_id'])
port1['port']['admin_state_up'] = False
port2['port']['admin_state_up'] = False
port3['port']['admin_state_up'] = False
self.plugin.update_port(self.context, port1_ret['id'], port1)
self.plugin.update_port(self.context, port2_ret['id'], port2)
self.plugin.update_port(self.context, port3_ret['id'], port3)
port_in_db1 = self.plugin.get_port(self.context, port1_ret['id'])
port_in_db2 = self.plugin.get_port(self.context, port2_ret['id'])
port_in_db3 = self.plugin.get_port(self.context, port3_ret['id'])
self.assertEqual(False, port_in_db1['admin_state_up'])
self.assertEqual(False, port_in_db2['admin_state_up'])
self.assertEqual(False, port_in_db3['admin_state_up'])
self.plugin.delete_port(self.context, port1_ret['id'])
self.plugin.delete_port(self.context, port2_ret['id'])
self.plugin.delete_port(self.context, port3_ret['id'])
self.plugin.delete_network(self.context, network_ret1['id'])
self.plugin.delete_network(self.context, network_ret2['id'])
self.plugin.delete_network(self.context, network_ret3['id'])
def test_create_delete_subnet(self):
# for this test we need to enable overlapping ips
cfg.CONF.set_default('allow_overlapping_ips', True)
network1 = self._fake_network('fake1')
network_ret1 = self.plugin.create_network(self.context, network1)
network2 = self._fake_network('fake2')
network_ret2 = self.plugin.create_network(self.context, network2)
network3 = self._fake_network('proxy')
network_ret3 = self.plugin.create_network(self.context, network3)
subnet1 = self._fake_subnet(network_ret1['id'])
subnet2 = self._fake_subnet(network_ret2['id'])
subnet3 = self._fake_subnet(network_ret3['id'])
subnet1_ret = self.plugin.create_subnet(self.context, subnet1)
subnet2_ret = self.plugin.create_subnet(self.context, subnet2)
subnet3_ret = self.plugin.create_subnet(self.context, subnet3)
self.assertEqual(network_ret1['id'], subnet1_ret['network_id'])
self.assertEqual(network_ret2['id'], subnet2_ret['network_id'])
self.assertEqual(network_ret3['id'], subnet3_ret['network_id'])
subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id'])
subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id'])
subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id'])
subnet1['subnet']['allocation_pools'].pop()
subnet2['subnet']['allocation_pools'].pop()
subnet3['subnet']['allocation_pools'].pop()
self.plugin.update_subnet(self.context,
subnet1_ret['id'], subnet1)
self.plugin.update_subnet(self.context,
subnet2_ret['id'], subnet2)
self.plugin.update_subnet(self.context,
subnet3_ret['id'], subnet3)
subnet_in_db1 = self.plugin.get_subnet(self.context, subnet1_ret['id'])
subnet_in_db2 = self.plugin.get_subnet(self.context, subnet2_ret['id'])
subnet_in_db3 = self.plugin.get_subnet(self.context, subnet3_ret['id'])
self.assertEqual(4, subnet_in_db1['ip_version'])
self.assertEqual(4, subnet_in_db2['ip_version'])
self.assertEqual(4, subnet_in_db3['ip_version'])
self.plugin.delete_subnet(self.context, subnet1_ret['id'])
self.plugin.delete_subnet(self.context, subnet2_ret['id'])
self.plugin.delete_subnet(self.context, subnet3_ret['id'])
self.plugin.delete_network(self.context, network_ret1['id'])
self.plugin.delete_network(self.context, network_ret2['id'])
self.plugin.delete_network(self.context, network_ret3['id'])
def test_create_delete_router(self):
router1 = self._fake_router('fake1')
router_ret1 = self.plugin.create_router(self.context, router1)
router2 = self._fake_router('fake2')
router_ret2 = self.plugin.create_router(self.context, router2)
self.assertEqual('fake1', router_ret1[FLAVOR_ROUTER])
self.assertEqual('fake2', router_ret2[FLAVOR_ROUTER])
router_in_db1 = self.plugin.get_router(self.context, router_ret1['id'])
router_in_db2 = self.plugin.get_router(self.context, router_ret2['id'])
self.assertEqual('fake1', router_in_db1[FLAVOR_ROUTER])
self.assertEqual('fake2', router_in_db2[FLAVOR_ROUTER])
self.plugin.delete_router(self.context, router_ret1['id'])
self.plugin.delete_router(self.context, router_ret2['id'])
with testtools.ExpectedException(FlavorNotFound):
self.plugin.get_router(self.context, router_ret1['id'])
def test_extension_method(self):
self.assertEqual('fake1', self.plugin.fake_func())
self.assertEqual('fake2', self.plugin.fake_func2())
def test_extension_not_implemented_method(self):
try:
self.plugin.not_implemented()
except AttributeError:
return
except Exception:
self.fail("AttributeError Error is not raised")
self.fail("No Error is not raised")
def tearDown(self):
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
db.clear_db()
super(MetaQuantumPluginV2Test, self).tearDown()
|
yamt/neutron
|
quantum/tests/unit/metaplugin/test_metaplugin.py
|
Python
|
apache-2.0
| 13,773
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
import base64
import json
from six import string_types
from kubernetes_py.models.unversioned.BaseModel import BaseModel
from kubernetes_py.models.v1.ObjectMeta import ObjectMeta
from kubernetes_py.utils import is_valid_string, is_valid_dict
class Secret(BaseModel):
"""
http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_secret
"""
K8s_ANNOTATION_SERVICE_ACCOUNT_NAME = "kubernetes.io/service-account.name"
K8s_ANNOTATION_SERVICE_ACCOUNT_UID = "kubernetes.io/service-account.uid"
K8s_TYPE_DOCKER_CONFIG = "kubernetes.io/dockerconfigjson"
K8s_TYPE_SERVICE_ACCOUNT = "kubernetes.io/service-account-token"
K8s_TYPE_OPAQUE = "Opaque"
K8s_TYPE_DOCKER_CONFIG_V1 = "kubernetes.io/dockercfg"
K8s_TYPE_BASIC_AUTH = "kubernetes.io/basic-auth"
K8s_TYPE_SSH_AUTH = "kubernetes.io/ssh-auth"
K8s_TYPE_TLS = "kubernetes.io/tls"
def __init__(self, model=None):
super(Secret, self).__init__()
self.kind = "Secret"
self.api_version = "v1"
self._data = {}
self._string_data = None
self._type = None
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
super(Secret, self).build_with_model(model)
if "data" in model:
d = {}
for k, v in model["data"].items():
d[k] = base64.b64decode(v)
self.data = d
if "stringData" in model:
self.string_data = model["stringData"]
if "type" in model:
self.type = model["type"]
# ------------------------------------------------------------------------------------- add
def add_annotation(self, k=None, v=None):
anns = self.metadata.annotations
if anns is None:
anns = {}
anns.update({k: v})
self.metadata.annotations = anns
return self
def add_label(self, k=None, v=None):
labels = self.metadata.labels
if labels is None:
labels = {}
labels.update({k: v})
self.metadata.labels = labels
return self
# ------------------------------------------------------------------------------------- kind
@property
def kind(self):
return self._kind
@kind.setter
def kind(self, k=None):
if not is_valid_string(k):
raise SyntaxError("Secret: kind: [ {0} ] is invalid.".format(k))
self._kind = k
# ------------------------------------------------------------------------------------- apiVersion
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, v=None):
if not is_valid_string(v):
raise SyntaxError("Secret: api_version: [ {0} ] is invalid.".format(v))
self._api_version = v
# ------------------------------------------------------------------------------------- labels
@property
def labels(self):
return self.metadata.labels
@labels.setter
def labels(self, labels=None):
if not is_valid_dict(labels):
raise SyntaxError("Secret: labels: [ {0} ] is invalid.".format(labels))
self.metadata.labels = labels
# ------------------------------------------------------------------------------------- metadata
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, md=None):
if not isinstance(md, ObjectMeta):
raise SyntaxError("Secret: metadata: [ {0} ] is invalid.".format(md))
self._metadata = md
# ------------------------------------------------------------------------------------- data
@property
def data(self):
d = {}
for k, v in self._data.items():
d[k] = base64.b64decode(v)
if isinstance(d[k], bytes):
d[k] = d[k].decode()
elif is_valid_string(d[k]):
d[k] = d[k].decode()
return d
@data.setter
def data(self, data=None):
msg = "Secret: data: [ {0} ] is invalid.".format(data)
if isinstance(data, string_types):
try:
data = json.loads(data)
except ValueError:
raise SyntaxError(msg)
if not is_valid_dict(data):
raise SyntaxError(msg)
for k, v in data.items():
if not is_valid_string(k):
raise SyntaxError(msg)
if not isinstance(v, bytes):
try:
v = bytearray(v, "UTF-8")
except:
raise SyntaxError("Could not convert [ {0} ] to bytes.".format(v))
self._data[k] = base64.b64encode(v)
# ------------------------------------------------------------------------------------- stringData
@property
def string_data(self):
return self._string_data
@string_data.setter
def string_data(self, data=None):
if not is_valid_dict(data):
raise SyntaxError("Secret: string_data: [ {0} ] is invalid.".format(data))
self._string_data = data
# ------------------------------------------------------------------------------------- type
@property
def type(self):
return self._type
@type.setter
def type(self, t=None):
if not is_valid_string(t):
raise SyntaxError("Secret: type: [ {0} ] is invalid.".format(t))
self._type = t
# ------------------------------------------------------------------------------------- dockercfg json
@property
def dockerconfigjson(self):
if ".dockerconfigjson" in self.data:
return self.data[".dockerconfigjson"]
return None
@dockerconfigjson.setter
def dockerconfigjson(self, secret=None):
if not is_valid_dict(secret):
raise SyntaxError("Secret: .dockerconfigjson: [ {} ] is invalid.".format(secret))
self.type = self.K8s_TYPE_DOCKER_CONFIG
s = json.dumps(secret)
utf = s.encode("utf-8")
self.data = {".dockerconfigjson": utf}
# ------------------------------------------------------------------------------------- service account token
def set_service_account_token(self, account_name=None, account_uid=None, token=None, kubecfg_data=None, cacert=None):
for x in [account_name, account_uid, token]:
if not is_valid_string(x):
raise SyntaxError("Secret.set_service_account() account_name: [ {} ] is invalid.".format(x))
if not is_valid_string(account_uid):
raise SyntaxError("Secret.set_service_account() account_uid: [ {} ] is invalid.".format(account_uid))
if not is_valid_string(token):
raise SyntaxError("Secret.set_service_account() token: [ {} ] is invalid.".format(token))
anns = {self.K8s_ANNOTATION_SERVICE_ACCOUNT_NAME: account_name, self.K8s_ANNOTATION_SERVICE_ACCOUNT_UID: account_uid}
self.type = self.K8s_TYPE_SERVICE_ACCOUNT
self.metadata.annotations = anns
self.data = {"token": token}
if is_valid_string(kubecfg_data):
d = self.data
d.update({"kubernetes_py.kubeconfig": kubecfg_data})
self.data = d
if is_valid_string(cacert):
d = self.data
d.update({"ca.crt": cacert})
self.data = d
return self
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = super(Secret, self).serialize()
if self.data is not None:
d = {}
for k, v in self.data.items():
if is_valid_string(v):
v = bytearray(source=v, encoding="UTF-8")
d[k] = base64.b64encode(v)
if isinstance(d[k], bytes):
d[k] = d[k].decode()
data["data"] = d
if self.string_data is not None:
data["stringData"] = self.string_data
if self.type is not None:
data["type"] = self.type
return data
|
sebastienc/kubernetes-py
|
kubernetes_py/models/v1/Secret.py
|
Python
|
apache-2.0
| 8,321
|
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from django import test
from common import api
from common import util
from common import validate
from common.test import base
class CommonViewTest(base.ViewTestCase):
def test_redirect_slash(self):
r = self.login_and_get('popular', '/user/popular/overview/')
redirected = self.assertRedirectsPrefix(r, '/user/popular/overview')
self.assertTemplateUsed(redirected, 'actor/templates/overview.html')
def test_confirm(self):
nonce = util.create_nonce('popular', 'entry_remove')
entry = 'stream/popular%40example.com/presence/12345'
path = '/user/popular/overview'
r = self.login_and_get('popular', path, {'entry_remove': entry,
'_nonce': nonce})
r = self.assertRedirectsPrefix(r, '/confirm')
self.assertContains(r, nonce)
self.assertContains(r, entry)
self.assertContains(r, path)
class UtilTestCase(test.TestCase):
def test_get_user_from_topic(self):
topics = [('root@example.com', 'inbox/root@example.com/presence'),
('root@example.com', 'inbox/root@example.com/overview'),
('root@example.com', 'stream/root@example.com/presence/12345'),
(None, 'stream//presence'),
(None, 'stream/something/else'),
('duuom+aasdd@gmail.com', 'crazy/duuom+aasdd@gmail.com/dddfff$$%%///'),
('asdad@asdasd@asdasd', 'multi/asdad@asdasd@asdasd/cllad/asdff')]
for t in topics:
self.assertEqual(util.get_user_from_topic(t[1]), t[0], t[1])
# We're going to import the rest of the test cases into the local
# namespace so that we can run them as
# python manage.py test common.WhateverTest
from common.test.api import *
from common.test.clean import *
from common.test.db import *
from common.test.domain import *
from common.test.monitor import *
from common.test.notification import *
from common.test.patterns import *
from common.test.queue import *
from common.test.sms import *
from common.test.throttle import *
from common.test.validate import *
from common.templatetags.test.avatar import *
from common.templatetags.test.format import *
from common.templatetags.test.presence import *
# This is for legacy compat with older tests
# TODO(termie): remove me when no longer needed
from common.test.base import *
from common.test.util import *
|
lemonad/jaikuengine
|
common/tests.py
|
Python
|
apache-2.0
| 2,923
|
# Written by Bram Cohen
# modified for multitracker operation by John Hoffman
# see LICENSE.txt for license information
from BitTornado.zurllib import urlopen, quote
from urlparse import urlparse, urlunparse
from socket import gethostbyname
from btformats import check_peers
from BitTornado.bencode import bdecode
from threading import Thread, Lock
from cStringIO import StringIO
from traceback import print_exc
from socket import error, gethostbyname
from random import shuffle
from hashlib import sha1
from time import time
try:
from os import getpid
except ImportError:
def getpid():
return 1
try:
True
except:
True = 1
False = 0
mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
keys = {}
basekeydata = str(getpid()) + repr(time()) + 'tracker'
def add_key(tracker):
key = ''
for i in sha1(basekeydata+tracker).digest()[-6:]:
key += mapbase64[ord(i) & 0x3F]
keys[tracker] = key
def get_key(tracker):
try:
return "&key="+keys[tracker]
except:
add_key(tracker)
return "&key="+keys[tracker]
class fakeflag:
def __init__(self, state=False):
self.state = state
def wait(self):
pass
def isSet(self):
return self.state
class Rerequester:
def __init__( self, port, myid, infohash, trackerlist, config,
sched, externalsched, errorfunc, excfunc, connect,
howmany, amount_left, up, down, upratefunc, downratefunc,
doneflag, unpauseflag = fakeflag(True),
seededfunc = None, force_rapid_update = False ):
self.sched = sched
self.externalsched = externalsched
self.errorfunc = errorfunc
self.excfunc = excfunc
self.connect = connect
self.howmany = howmany
self.amount_left = amount_left
self.up = up
self.down = down
self.upratefunc = upratefunc
self.downratefunc = downratefunc
self.doneflag = doneflag
self.unpauseflag = unpauseflag
self.seededfunc = seededfunc
self.force_rapid_update = force_rapid_update
self.ip = config.get('ip','')
self.minpeers = config['min_peers']
self.maxpeers = config['max_initiate']
self.interval = config['rerequest_interval']
self.timeout = config['http_timeout']
newtrackerlist = []
for tier in trackerlist:
if len(tier)>1:
shuffle(tier)
newtrackerlist += [tier]
self.trackerlist = newtrackerlist
self.lastsuccessful = ''
self.rejectedmessage = 'rejected by tracker - '
self.url = ('info_hash=%s&peer_id=%s' %
(quote(infohash), quote(myid)))
if not config.get('crypto_allowed'):
self.url += "&port="
else:
self.url += "&supportcrypto=1"
if not config.get('crypto_only'):
self.url += "&port="
else:
self.url += "&requirecrypto=1"
if not config.get('crypto_stealth'):
self.url += "&port="
else:
self.url += "&port=0&cryptoport="
self.url += str(port)
seed_id = config.get('dedicated_seed_id')
if seed_id:
self.url += '&seed_id='+quote(seed_id)
if self.seededfunc:
self.url += '&check_seeded=1'
self.last = None
self.trackerid = None
self.announce_interval = 30 * 60
self.last_failed = True
self.never_succeeded = True
self.errorcodes = {}
self.lock = SuccessLock()
self.special = None
self.stopped = False
def start(self):
self.sched(self.c, self.interval/2)
self.d(0)
def c(self):
if self.stopped:
return
if not self.unpauseflag.isSet() and (
self.howmany() < self.minpeers or self.force_rapid_update ):
self.announce(3, self._c)
else:
self._c()
def _c(self):
self.sched(self.c, self.interval)
def d(self, event = 3):
if self.stopped:
return
if not self.unpauseflag.isSet():
self._d()
return
self.announce(event, self._d)
def _d(self):
if self.never_succeeded:
self.sched(self.d, 60) # retry in 60 seconds
elif self.force_rapid_update:
return
else:
self.sched(self.d, self.announce_interval)
def hit(self, event = 3):
if not self.unpauseflag.isSet() and (
self.howmany() < self.minpeers or self.force_rapid_update ):
self.announce(event)
def announce(self, event = 3, callback = lambda: None, specialurl = None):
if specialurl is not None:
s = self.url+'&uploaded=0&downloaded=0&left=1' # don't add to statistics
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
self.last_failed = True # force true, so will display an error
self.special = specialurl
self.rerequest(s, callback)
return
else:
s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
(self.url, str(self.up()), str(self.down()),
str(self.amount_left())))
if self.last is not None:
s += '&last=' + quote(str(self.last))
if self.trackerid is not None:
s += '&trackerid=' + quote(str(self.trackerid))
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
if event != 3:
s += '&event=' + ['started', 'completed', 'stopped'][event]
if event == 2:
self.stopped = True
self.rerequest(s, callback)
def snoop(self, peers, callback = lambda: None): # tracker call support
self.rerequest(self.url
+'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant='
+str(peers), callback)
def rerequest(self, s, callback):
if not self.lock.isfinished(): # still waiting for prior cycle to complete??
def retry(self = self, s = s, callback = callback):
self.rerequest(s, callback)
self.sched(retry,5) # retry in 5 seconds
return
self.lock.reset()
rq = Thread(target = self._rerequest, args = [s, callback])
rq.setDaemon(False)
rq.start()
def _rerequest(self, s, callback):
try:
def fail (self = self, callback = callback):
self._fail(callback)
if self.ip:
try:
s += '&ip=' + gethostbyname(self.ip)
except:
self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip
self.externalsched(fail)
self.errorcodes = {}
if self.special is None:
for t in range(len(self.trackerlist)):
for tr in range(len(self.trackerlist[t])):
tracker = self.trackerlist[t][tr]
if self.rerequest_single(tracker, s, callback):
if not self.last_failed and tr != 0:
del self.trackerlist[t][tr]
self.trackerlist[t] = [tracker] + self.trackerlist[t]
return
else:
tracker = self.special
self.special = None
if self.rerequest_single(tracker, s, callback):
return
# no success from any tracker
self.externalsched(fail)
except:
self.exception(callback)
def _fail(self, callback):
if ( (self.upratefunc() < 100 and self.downratefunc() < 100)
or not self.amount_left() ):
for f in ['rejected', 'bad_data', 'troublecode']:
if self.errorcodes.has_key(f):
r = self.errorcodes[f]
break
else:
r = 'Problem connecting to tracker - unspecified error'
self.errorfunc(r)
self.last_failed = True
self.lock.give_up()
self.externalsched(callback)
def rerequest_single(self, t, s, callback):
l = self.lock.set()
rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback])
rq.setDaemon(False)
rq.start()
self.lock.wait()
if self.lock.success:
self.lastsuccessful = t
self.last_failed = False
self.never_succeeded = False
return True
if not self.last_failed and self.lastsuccessful == t:
# if the last tracker hit was successful, and you've just tried the tracker
# you'd contacted before, don't go any further, just fail silently.
self.last_failed = True
self.externalsched(callback)
self.lock.give_up()
return True
return False # returns true if it wants rerequest() to exit
def _rerequest_single(self, t, s, l, callback):
try:
closer = [None]
def timedout(self = self, l = l, closer = closer):
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded'
self.lock.unwait(l)
try:
closer[0]()
except:
pass
self.externalsched(timedout, self.timeout)
err = None
try:
url,q = t.split('?',1)
q += '&'+s
except:
url = t
q = s
try:
h = urlopen(url+'?'+q)
closer[0] = h.close
data = h.read()
except (IOError, error), e:
err = 'Problem connecting to tracker - ' + str(e)
except:
err = 'Problem connecting to tracker'
try:
h.close()
except:
pass
if err:
if self.lock.trip(l):
self.errorcodes['troublecode'] = err
self.lock.unwait(l)
return
if data == '':
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'no data from tracker'
self.lock.unwait(l)
return
try:
r = bdecode(data, sloppy=1)
check_peers(r)
except ValueError, e:
if self.lock.trip(l):
self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e)
self.lock.unwait(l)
return
if r.has_key('failure reason'):
if self.lock.trip(l):
self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason']
self.lock.unwait(l)
return
if self.lock.trip(l, True): # success!
self.lock.unwait(l)
else:
callback = lambda: None # attempt timed out, don't do a callback
# even if the attempt timed out, go ahead and process data
def add(self = self, r = r, callback = callback):
self.postrequest(r, callback)
self.externalsched(add)
except:
self.exception(callback)
def postrequest(self, r, callback):
if r.has_key('warning message'):
self.errorfunc('warning from tracker - ' + r['warning message'])
self.announce_interval = r.get('interval', self.announce_interval)
self.interval = r.get('min interval', self.interval)
self.trackerid = r.get('tracker id', self.trackerid)
self.last = r.get('last')
# ps = len(r['peers']) + self.howmany()
p = r['peers']
peers = []
if type(p) == type(''):
lenpeers = len(p)/6
else:
lenpeers = len(p)
cflags = r.get('crypto_flags')
if type(cflags) != type('') or len(cflags) != lenpeers:
cflags = None
if cflags is None:
cflags = [None for i in xrange(lenpeers)]
else:
cflags = [ord(x) for x in cflags]
if type(p) == type(''):
for x in xrange(0, len(p), 6):
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
port = (ord(p[x+4]) << 8) | ord(p[x+5])
peers.append(((ip, port), 0, cflags[int(x/6)]))
else:
for i in xrange(len(p)):
x = p[i]
peers.append(((x['ip'].strip(), x['port']),
x.get('peer id',0), cflags[i]))
ps = len(peers) + self.howmany()
if ps < self.maxpeers:
if self.doneflag.isSet():
if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
self.last = None
else:
if r.get('num peers', 1000) > ps * 1.2:
self.last = None
if self.seededfunc and r.get('seeded'):
self.seededfunc()
elif peers:
shuffle(peers)
self.connect(peers)
callback()
def exception(self, callback):
data = StringIO()
print_exc(file = data)
def r(s = data.getvalue(), callback = callback):
if self.excfunc:
self.excfunc(s)
else:
print s
callback()
self.externalsched(r)
class SuccessLock:
def __init__(self):
self.lock = Lock()
self.pause = Lock()
self.code = 0L
self.success = False
self.finished = True
def reset(self):
self.success = False
self.finished = False
def set(self):
self.lock.acquire()
if not self.pause.locked():
self.pause.acquire()
self.first = True
self.code += 1L
self.lock.release()
return self.code
def trip(self, code, s = False):
self.lock.acquire()
try:
if code == self.code and not self.finished:
r = self.first
self.first = False
if s:
self.finished = True
self.success = True
return r
finally:
self.lock.release()
def give_up(self):
self.lock.acquire()
self.success = False
self.finished = True
self.lock.release()
def wait(self):
self.pause.acquire()
def unwait(self, code):
if code == self.code and self.pause.locked():
self.pause.release()
def isfinished(self):
self.lock.acquire()
x = self.finished
self.lock.release()
return x
|
Skruf90/FriendlyTorrent
|
src/tornado/BitTornado/BT1/Rerequester.py
|
Python
|
apache-2.0
| 15,296
|
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from neutron.common import utils
METADATA_PROXY_HANDLER_OPTS = [
cfg.StrOpt('admin_user',
help=_("Admin user")),
cfg.StrOpt('admin_password',
help=_("Admin password"),
secret=True),
cfg.StrOpt('admin_tenant_name',
help=_("Admin tenant name")),
cfg.StrOpt('auth_url',
help=_("Authentication URL")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('auth_region',
help=_("Authentication region")),
cfg.BoolOpt('auth_insecure',
default=False,
help=_("Turn off verification of the certificate for"
" ssl")),
cfg.StrOpt('auth_ca_cert',
help=_("Certificate Authority public key (CA cert) "
"file for ssl")),
cfg.StrOpt('endpoint_type',
default='adminURL',
help=_("Network service endpoint type to pull from "
"the keystone catalog")),
cfg.StrOpt('nova_metadata_ip', default='127.0.0.1',
help=_("IP address used by Nova metadata server.")),
cfg.IntOpt('nova_metadata_port',
default=8775,
help=_("TCP Port used by Nova metadata server.")),
cfg.StrOpt('metadata_proxy_shared_secret',
default='',
help=_('Shared secret to sign instance-id request'),
secret=True),
cfg.StrOpt('nova_metadata_protocol',
default='http',
choices=['http', 'https'],
help=_("Protocol to access nova metadata, http or https")),
cfg.BoolOpt('nova_metadata_insecure', default=False,
help=_("Allow to perform insecure SSL (https) requests to "
"nova metadata")),
cfg.StrOpt('nova_client_cert',
default='',
help=_("Client certificate for nova metadata api server.")),
cfg.StrOpt('nova_client_priv_key',
default='',
help=_("Private key of client certificate."))
]
UNIX_DOMAIN_METADATA_PROXY_OPTS = [
cfg.StrOpt('metadata_proxy_socket',
default='$state_path/metadata_proxy',
help=_('Location for Metadata Proxy UNIX domain socket')),
cfg.IntOpt('metadata_workers',
default=utils.cpu_count() // 2,
help=_('Number of separate worker processes for metadata '
'server')),
cfg.IntOpt('metadata_backlog',
default=4096,
help=_('Number of backlog requests to configure the '
'metadata server socket with'))
]
|
projectcalico/calico-neutron
|
neutron/agent/metadata/config.py
|
Python
|
apache-2.0
| 3,403
|
"""
Support for ISY994 fans.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/fan.isy994/
"""
import logging
from typing import Callable
from homeassistant.components.fan import (FanEntity, DOMAIN, SPEED_OFF,
SPEED_LOW, SPEED_MEDIUM,
SPEED_HIGH, SUPPORT_SET_SPEED)
from homeassistant.components.isy994 import (ISY994_NODES, ISY994_PROGRAMS,
ISYDevice)
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
VALUE_TO_STATE = {
0: SPEED_OFF,
63: SPEED_LOW,
64: SPEED_LOW,
190: SPEED_MEDIUM,
191: SPEED_MEDIUM,
255: SPEED_HIGH,
}
STATE_TO_VALUE = {}
for key in VALUE_TO_STATE:
STATE_TO_VALUE[VALUE_TO_STATE[key]] = key
def setup_platform(hass, config: ConfigType,
add_entities: Callable[[list], None], discovery_info=None):
"""Set up the ISY994 fan platform."""
devices = []
for node in hass.data[ISY994_NODES][DOMAIN]:
devices.append(ISYFanDevice(node))
for name, status, actions in hass.data[ISY994_PROGRAMS][DOMAIN]:
devices.append(ISYFanProgram(name, status, actions))
add_entities(devices)
class ISYFanDevice(ISYDevice, FanEntity):
"""Representation of an ISY994 fan device."""
@property
def speed(self) -> str:
"""Return the current speed."""
return VALUE_TO_STATE.get(self.value)
@property
def is_on(self) -> bool:
"""Get if the fan is on."""
return self.value != 0
def set_speed(self, speed: str) -> None:
"""Send the set speed command to the ISY994 fan device."""
self._node.on(val=STATE_TO_VALUE.get(speed, 255))
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn on command to the ISY994 fan device."""
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 fan device."""
self._node.off()
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
class ISYFanProgram(ISYFanDevice):
"""Representation of an ISY994 fan program."""
def __init__(self, name: str, node, actions) -> None:
"""Initialize the ISY994 fan program."""
super().__init__(node)
self._name = name
self._actions = actions
def turn_off(self, **kwargs) -> None:
"""Send the turn on command to ISY994 fan program."""
if not self._actions.runThen():
_LOGGER.error("Unable to turn off the fan")
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Send the turn off command to ISY994 fan program."""
if not self._actions.runElse():
_LOGGER.error("Unable to turn on the fan")
@property
def supported_features(self) -> int:
"""Flag supported features."""
return 0
|
PetePriority/home-assistant
|
homeassistant/components/isy994/fan.py
|
Python
|
apache-2.0
| 3,213
|
"""Platform for the Garadget cover component."""
import logging
import requests
import voluptuous as vol
from homeassistant.components.cover import PLATFORM_SCHEMA, CoverDevice
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_COVERS,
CONF_DEVICE,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
STATE_CLOSED,
STATE_OPEN,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_utc_time_change
_LOGGER = logging.getLogger(__name__)
ATTR_AVAILABLE = "available"
ATTR_SENSOR_STRENGTH = "sensor_reflection_rate"
ATTR_SIGNAL_STRENGTH = "wifi_signal_strength"
ATTR_TIME_IN_STATE = "time_in_state"
DEFAULT_NAME = "Garadget"
STATE_CLOSING = "closing"
STATE_OFFLINE = "offline"
STATE_OPENING = "opening"
STATE_STOPPED = "stopped"
STATES_MAP = {
"open": STATE_OPEN,
"opening": STATE_OPENING,
"closed": STATE_CLOSED,
"closing": STATE_CLOSING,
"stopped": STATE_STOPPED,
}
COVER_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ACCESS_TOKEN): cv.string,
vol.Optional(CONF_DEVICE): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_COVERS): cv.schema_with_slug_keys(COVER_SCHEMA)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Garadget covers."""
covers = []
devices = config.get(CONF_COVERS)
for device_id, device_config in devices.items():
args = {
"name": device_config.get(CONF_NAME),
"device_id": device_config.get(CONF_DEVICE, device_id),
"username": device_config.get(CONF_USERNAME),
"password": device_config.get(CONF_PASSWORD),
"access_token": device_config.get(CONF_ACCESS_TOKEN),
}
covers.append(GaradgetCover(hass, args))
add_entities(covers)
class GaradgetCover(CoverDevice):
"""Representation of a Garadget cover."""
def __init__(self, hass, args):
"""Initialize the cover."""
self.particle_url = "https://api.particle.io"
self.hass = hass
self._name = args["name"]
self.device_id = args["device_id"]
self.access_token = args["access_token"]
self.obtained_token = False
self._username = args["username"]
self._password = args["password"]
self._state = None
self.time_in_state = None
self.signal = None
self.sensor = None
self._unsub_listener_cover = None
self._available = True
if self.access_token is None:
self.access_token = self.get_token()
self._obtained_token = True
try:
if self._name is None:
doorconfig = self._get_variable("doorConfig")
if doorconfig["nme"] is not None:
self._name = doorconfig["nme"]
self.update()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to server: %(reason)s", dict(reason=ex))
self._state = STATE_OFFLINE
self._available = False
self._name = DEFAULT_NAME
except KeyError:
_LOGGER.warning(
"Garadget device %(device)s seems to be offline",
dict(device=self.device_id),
)
self._name = DEFAULT_NAME
self._state = STATE_OFFLINE
self._available = False
def __del__(self):
"""Try to remove token."""
if self._obtained_token is True:
if self.access_token is not None:
self.remove_token()
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return True
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
data = {}
if self.signal is not None:
data[ATTR_SIGNAL_STRENGTH] = self.signal
if self.time_in_state is not None:
data[ATTR_TIME_IN_STATE] = self.time_in_state
if self.sensor is not None:
data[ATTR_SENSOR_STRENGTH] = self.sensor
if self.access_token is not None:
data[CONF_ACCESS_TOKEN] = self.access_token
return data
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state is None:
return None
return self._state == STATE_CLOSED
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return "garage"
def get_token(self):
"""Get new token for usage during this session."""
args = {
"grant_type": "password",
"username": self._username,
"password": self._password,
}
url = f"{self.particle_url}/oauth/token"
ret = requests.post(url, auth=("particle", "particle"), data=args, timeout=10)
try:
return ret.json()["access_token"]
except KeyError:
_LOGGER.error("Unable to retrieve access token")
def remove_token(self):
"""Remove authorization token from API."""
url = f"{self.particle_url}/v1/access_tokens/{self.access_token}"
ret = requests.delete(url, auth=(self._username, self._password), timeout=10)
return ret.text
def _start_watcher(self, command):
"""Start watcher."""
_LOGGER.debug("Starting Watcher for command: %s ", command)
if self._unsub_listener_cover is None:
self._unsub_listener_cover = track_utc_time_change(
self.hass, self._check_state
)
def _check_state(self, now):
"""Check the state of the service during an operation."""
self.schedule_update_ha_state(True)
def close_cover(self, **kwargs):
"""Close the cover."""
if self._state not in ["close", "closing"]:
ret = self._put_command("setState", "close")
self._start_watcher("close")
return ret.get("return_value") == 1
def open_cover(self, **kwargs):
"""Open the cover."""
if self._state not in ["open", "opening"]:
ret = self._put_command("setState", "open")
self._start_watcher("open")
return ret.get("return_value") == 1
def stop_cover(self, **kwargs):
"""Stop the door where it is."""
if self._state not in ["stopped"]:
ret = self._put_command("setState", "stop")
self._start_watcher("stop")
return ret["return_value"] == 1
def update(self):
"""Get updated status from API."""
try:
status = self._get_variable("doorStatus")
_LOGGER.debug("Current Status: %s", status["status"])
self._state = STATES_MAP.get(status["status"], None)
self.time_in_state = status["time"]
self.signal = status["signal"]
self.sensor = status["sensor"]
self._available = True
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to server: %(reason)s", dict(reason=ex))
self._state = STATE_OFFLINE
except KeyError:
_LOGGER.warning(
"Garadget device %(device)s seems to be offline",
dict(device=self.device_id),
)
self._state = STATE_OFFLINE
if self._state not in [STATE_CLOSING, STATE_OPENING]:
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
def _get_variable(self, var):
"""Get latest status."""
url = "{}/v1/devices/{}/{}?access_token={}".format(
self.particle_url, self.device_id, var, self.access_token
)
ret = requests.get(url, timeout=10)
result = {}
for pairs in ret.json()["result"].split("|"):
key = pairs.split("=")
result[key[0]] = key[1]
return result
def _put_command(self, func, arg=None):
"""Send commands to API."""
params = {"access_token": self.access_token}
if arg:
params["command"] = arg
url = f"{self.particle_url}/v1/devices/{self.device_id}/{func}"
ret = requests.post(url, data=params, timeout=10)
return ret.json()
|
leppa/home-assistant
|
homeassistant/components/garadget/cover.py
|
Python
|
apache-2.0
| 8,790
|
#!/usr/bin/env python
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import unittest
import os
import tempfile
# internal modules:
from yotta.lib.fsutils import rmRf
from . import cli
Test_Module_JSON = '''{
"name": "git-access-testing",
"version": "0.0.2",
"description": "Git Access Testing",
"author": "autopulated",
"homepage": "https://github.com/autopulated/git-access-testing",
"licenses": [
{
"url": "about:blank",
"type": ""
}
],
"dependencies": {
"testing-dummy": "git@bitbucket.org:autopulated/testing-dummy.git",
"other-testing-dummy": "git@bitbucket.org:autopulated/other-testing-dummy.git#0.0.2"
}
}
'''
class TestCLIOwners(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_dir = tempfile.mkdtemp()
with open(os.path.join(cls.test_dir, 'module.json'), 'w') as f:
f.write(Test_Module_JSON)
cls.saved_settings_dir = None
# override the settings directory, so that we can be sure we're not
# logged in
if 'YOTTA_USER_SETTINGS_DIR' in os.environ:
cls.saved_settings_dir = os.environ['YOTTA_USER_SETTINGS_DIR']
# use a directory called tmp_yotta_settings in the working directory:
os.environ['YOTTA_USER_SETTINGS_DIR'] = 'tmp_yotta_settings'
@classmethod
def tearDownClass(cls):
rmRf(cls.test_dir)
cls.test_dir = None
if cls.saved_settings_dir is not None:
os.environ['YOTTA_USER_SETTINGS_DIR'] = cls.saved_settings_dir
cls.saved_settings_dir = None
else:
del os.environ['YOTTA_USER_SETTINGS_DIR']
# you have have to be authenticated to list owners, so currently we only
# test that the commands fail correctly in noninteractive mode:
def test_listOwners(self):
stdout, stderr, statuscode = cli.run(['-n', 'owners', 'ls'], cwd=self.test_dir)
if statuscode != 0:
self.assertTrue((stdout+stderr).find('login required') != -1)
def test_addOwner(self):
stdout, stderr, statuscode = cli.run(['-n', 'owners', 'add', 'friend@example.com'], cwd=self.test_dir)
if statuscode != 0:
self.assertTrue((stdout+stderr).find('login required') != -1)
def test_rmOwner(self):
stdout, stderr, statuscode = cli.run(['-n', 'owners', 'rm', 'friend@example.com'], cwd=self.test_dir)
if statuscode != 0:
self.assertTrue((stdout+stderr).find('login required') != -1)
if __name__ == '__main__':
unittest.main()
|
BlackstoneEngineering/yotta
|
yotta/test/cli/owners.py
|
Python
|
apache-2.0
| 2,645
|
#!/usr/bin/env python
# Copyright (C) 2017 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import re
print "Generate framework fragment related code for leanback"
cls = ['Base', 'BaseRow', 'Browse', 'Details', 'Error', 'Headers',
'Playback', 'Rows', 'Search', 'VerticalGrid', 'Branded',
'GuidedStep', 'Onboarding', 'Video']
for w in cls:
print "copy {}SupportFragment to {}Fragment".format(w, w)
file = open('src/main/java/androidx/leanback/app/{}SupportFragment.java'.format(w), 'r')
content = "// CHECKSTYLE:OFF Generated code\n"
content = content + "/* This file is auto-generated from {}SupportFragment.java. DO NOT MODIFY. */\n\n".format(w)
for line in file:
line = line.replace('IS_FRAMEWORK_FRAGMENT = false', 'IS_FRAMEWORK_FRAGMENT = true');
for w2 in cls:
line = line.replace('{}SupportFragment'.format(w2), '{}Fragment'.format(w2))
line = line.replace('androidx.fragment.app.FragmentActivity', 'android.app.Activity')
line = line.replace('androidx.fragment.app.Fragment', 'android.app.Fragment')
line = line.replace('activity.getSupportFragmentManager()', 'activity.getFragmentManager()')
line = line.replace('FragmentActivity activity', 'Activity activity')
line = line.replace('FragmentActivity#onBackPressed', 'Activity#onBackPressed')
line = line.replace('(FragmentActivity', '(Activity')
line = line.replace('setEnterTransition(enterTransition)', 'setEnterTransition((android.transition.Transition) enterTransition)');
line = line.replace('setSharedElementEnterTransition(sharedElementTransition)', 'setSharedElementEnterTransition((android.transition.Transition) sharedElementTransition)');
line = line.replace('setExitTransition(exitTransition)', 'setExitTransition((android.transition.Transition) exitTransition)');
line = line.replace('requestPermissions(new', 'PermissionHelper.requestPermissions(SearchFragment.this, new');
# replace getContext() with FragmentUtil.getContext(XXXFragment.this), but dont match the case "view.getContext()"
line = re.sub(r'([^\.])getContext\(\)', r'\1FragmentUtil.getContext({}Fragment.this)'.format(w), line);
content = content + line
file.close()
# add deprecated tag to fragment class and inner classes/interfaces
content = re.sub(r'\*\/\n(@.*\n|)(public |abstract public |abstract |)class', '* @deprecated use {@link ' + w + 'SupportFragment}\n */\n@Deprecated\n\\1\\2class', content)
content = re.sub(r'\*\/\n public (static class|interface|final static class|abstract static class)', '* @deprecated use {@link ' + w + 'SupportFragment}\n */\n @Deprecated\n public \\1', content)
outfile = open('src/main/java/androidx/leanback/app/{}Fragment.java'.format(w), 'w')
outfile.write(content)
outfile.close()
print "copy VideoSupportFragmentGlueHost to VideoFragmentGlueHost"
file = open('src/main/java/androidx/leanback/app/VideoSupportFragmentGlueHost.java', 'r')
content = "// CHECKSTYLE:OFF Generated code\n"
content = content + "/* This file is auto-generated from VideoSupportFragmentGlueHost.java. DO NOT MODIFY. */\n\n"
for line in file:
line = line.replace('androidx.fragment.app.Fragment', 'android.app.Fragment')
line = line.replace('VideoSupportFragment', 'VideoFragment')
line = line.replace('PlaybackSupportFragment', 'PlaybackFragment')
content = content + line
file.close()
# add deprecated tag to class
content = re.sub(r'\*\/\npublic class', '* @deprecated use {@link VideoSupportFragmentGlueHost}\n */\n@Deprecated\npublic class', content)
outfile = open('src/main/java/androidx/leanback/app/VideoFragmentGlueHost.java', 'w')
outfile.write(content)
outfile.close()
print "copy PlaybackSupportFragmentGlueHost to PlaybackFragmentGlueHost"
file = open('src/main/java/androidx/leanback/app/PlaybackSupportFragmentGlueHost.java', 'r')
content = "// CHECKSTYLE:OFF Generated code\n"
content = content + "/* This file is auto-generated from {}PlaybackSupportFragmentGlueHost.java. DO NOT MODIFY. */\n\n"
for line in file:
line = line.replace('VideoSupportFragment', 'VideoFragment')
line = line.replace('PlaybackSupportFragment', 'PlaybackFragment')
line = line.replace('androidx.fragment.app.Fragment', 'android.app.Fragment')
content = content + line
file.close()
# add deprecated tag to class
content = re.sub(r'\*\/\npublic class', '* @deprecated use {@link PlaybackSupportFragmentGlueHost}\n */\n@Deprecated\npublic class', content)
outfile = open('src/main/java/androidx/leanback/app/PlaybackFragmentGlueHost.java', 'w')
outfile.write(content)
outfile.close()
print "copy DetailsSupportFragmentBackgroundController to DetailsFragmentBackgroundController"
file = open('src/main/java/androidx/leanback/app/DetailsSupportFragmentBackgroundController.java', 'r')
content = "// CHECKSTYLE:OFF Generated code\n"
content = content + "/* This file is auto-generated from {}DetailsSupportFragmentBackgroundController.java. DO NOT MODIFY. */\n\n"
for line in file:
line = line.replace('VideoSupportFragment', 'VideoFragment')
line = line.replace('DetailsSupportFragment', 'DetailsFragment')
line = line.replace('RowsSupportFragment', 'RowsFragment')
line = line.replace('androidx.fragment.app.Fragment', 'android.app.Fragment')
line = line.replace('mFragment.getContext()', 'FragmentUtil.getContext(mFragment)')
content = content + line
file.close()
# add deprecated tag to class
content = re.sub(r'\*\/\npublic class', '* @deprecated use {@link DetailsSupportFragmentBackgroundController}\n */\n@Deprecated\npublic class', content)
outfile = open('src/main/java/androidx/leanback/app/DetailsFragmentBackgroundController.java', 'w')
outfile.write(content)
outfile.close()
|
androidx/androidx
|
leanback/leanback/generatef.py
|
Python
|
apache-2.0
| 6,367
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import functools
import amqp.exceptions as amqp_exceptions
from kombu import Connection
from kombu import Exchange
from oslo_serialization import jsonutils
from kombu import Queue
from nailgun.logger import logger
from nailgun.settings import settings
from nailgun.rpc import utils
creds = (
("userid", "guest"),
("password", "guest"),
("hostname", "localhost"),
("port", "5672"),
)
conn_str = 'amqp://{0}:{1}@{2}:{3}//'.format(
*[settings.RABBITMQ.get(*cred) for cred in creds]
)
naily_exchange = Exchange(
'naily',
'topic',
durable=True
)
naily_queue = Queue(
'naily',
exchange=naily_exchange,
routing_key='naily'
)
naily_service_exchange = Exchange(
'naily_service',
'fanout',
durable=False,
auto_delete=True
)
naily_service_queue = Queue(
'naily_service',
exchange=naily_service_exchange
)
nailgun_exchange = Exchange(
'nailgun',
'topic',
durable=True
)
nailgun_queue = Queue(
'nailgun',
exchange=nailgun_exchange,
routing_key='nailgun'
)
def cast(name, message, service=False):
logger.debug(
"RPC cast to orchestrator:\n{0}".format(
jsonutils.dumps(message, indent=4)
)
)
use_queue = naily_queue if not service else naily_service_queue
use_exchange = naily_exchange if not service else naily_service_exchange
with Connection(conn_str) as conn:
with conn.Producer(serializer='json') as producer:
publish = functools.partial(producer.publish, message,
exchange=use_exchange, routing_key=name, declare=[use_queue])
try:
publish()
except amqp_exceptions.PreconditionFailed as e:
logger.warning(six.text_type(e))
# (dshulyak) we should drop both exchanges/queues in order
# for astute to be able to recover temporary queues
utils.delete_entities(
conn, naily_service_exchange, naily_service_queue,
naily_exchange, naily_queue)
publish()
|
huntxu/fuel-web
|
nailgun/nailgun/rpc/__init__.py
|
Python
|
apache-2.0
| 2,731
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test infrastructure for the NPU cascader"""
|
dmlc/tvm
|
tests/python/contrib/test_ethosu/cascader/__init__.py
|
Python
|
apache-2.0
| 832
|
# coding: utf-8
if DefLANG in ("RU", "UA"):
AnsBase_temp = tuple([line.decode("utf-8") for line in (
"\nВсего входов - %d\nВремя последнего входа - %s\nПоследняя роль - %s", # 0
"\nВремя последнего выхода - %s\nПричина выхода - %s", # 1
"\nНики: %s", # 2
"Нет статистики.", # 3
"«%s» сидит здесь - %s.", # 4
"Ты провёл здесь - %s.", # 5
"Здесь нет такого юзера." # 6
)])
else:
AnsBase_temp = (
"\nTotal joins - %d\nThe Last join-time - %s\nThe last role - %s", # 0
"\nThe last leave-time - %s\nExit reason - %s", # 1
"\nNicks: %s", # 2
"No statistics.", # 3
"'%s' spent here - %s.", # 4
"You spent here - %s.", # 5
"No such user here." # 6
)
|
alkorgun/blacksmith-2
|
expansions/user_stats/insc.py
|
Python
|
apache-2.0
| 813
|