code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env python
# encoding: utf-8
import base64
import os
DEBUG = True
LOGIN_URL = '/login/'
XSRF_COOKIES = True
# Secret key for cookies and password salt generation.
COOKIE_SECRET = base64.b64encode(os.urandom(32))
CSRF_COOKIE_NAME = "csrftoken"
STATIC_PATH = 'static'
TEMPLATE_PATH = 'templates/'
CACHE_DIRECTORY = '/tmp'
AUTOESCAPE = False
WORKIN_EXTENSIONS = []
INSTALLED_APPS = []
# Session engine settings
SESSION_ENGINE = 'workin.session.backends.redis_session.RedisSessionEngine'
SESSION_ENGINE_KWARGS = {}
# sqlalchemy settings
SQLALCHEMY_ENGINE_URL = None
SQLALCHEMY_ENGINE_KWARGS = {}
# Jinja2 settings
JINJA2_TEMPLATE_DIRS = []
JINJA2_SETTINGS = {"cache_size": 100}
JINJA2_CONTEXT_PROCESSORS = []
# Low level tornado options.
TORNADO_TRANSFORMS = None
TORNADO_DEFAULT_HOST = ""
TORNADO_WSGI_MODE = False
TORNADO_SETTINGS = {}
| knownsec/workin | workin/conf/global_settings.py | Python | bsd-3-clause | 860 |
# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import absolute_import, print_function
class QuickEditPresenter(object):
def __init__(self, view):
self._view = view
@property
def widget(self):
return self._view
def connect_autoscale_changed(self, slot):
self._view.connect_autoscale_changed(slot)
def connect_errors_changed(self, slot):
self._view.connect_errors_changed(slot)
def connect_x_range_changed(self, slot):
self._view.connect_x_range_changed(slot)
def connect_y_range_changed(self, slot):
self._view.connect_y_range_changed(slot)
def connect_plot_selection(self, slot):
self._view.connect_plot_selection(slot)
def add_subplot(self, name):
current = self._view.current_selection()
self._view.add_subplot(name)
index = self._view.find_index(current)
self._view.set_index(index)
def all(self):
return [self._view.plot_at_index(index) for index in range(1, self._view.number_of_plots())]
def set_plot_x_range(self, range):
self._view.set_plot_x_range(range)
def set_plot_y_range(self,y_range):
self._view.set_plot_y_range(y_range)
def set_errors(self, state):
previous = self._view.get_errors()
if previous == state:
return
self._view.set_errors(state)
def rm_subplot(self, name):
current = self._view.current_selection()
to_remove = self._view.find_index(name)
self._view.rm_subplot(to_remove)
index = self._view.find_index(current)
self._view.set_index(index)
| mganeva/mantid | scripts/MultiPlotting/QuickEdit/quickEdit_presenter.py | Python | gpl-3.0 | 1,871 |
from setuptools import setup
try:
with open('readme.rst') as f:
long_description = f.read()
except IOError:
with open('readme.md') as f:
long_description = f.read()
def read_version():
with open('eralchemy/version.py') as f:
code = f.readlines()[0]
exec(code)
assert ('version' in locals())
return locals()['version']
setup(
name='ERAlchemy',
version=read_version(),
description='Simple entity relation (ER) diagrams generation',
long_description=long_description,
# The project's main homepage.d
url='https://github.com/Alexis-benoist/eralchemy',
# Author details
author='Alexis Benoist',
author_email='alexis.benoist@gmail.com',
# Choose your license
license='Apache License 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Visualization',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Database',
],
# What does your project relate to?
keywords='sql relational databases ER diagram render',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=[
'eralchemy',
],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=[
'SQLAlchemy',
'pygraphviz'
],
entry_points={
'console_scripts': [
'eralchemy=eralchemy.main:cli',
],
},
)
| Alexis-benoist/eralchemy | setup.py | Python | apache-2.0 | 2,202 |
# ===========
# pysap - Python library for crafting SAP's network protocols packets
#
# Copyright (C) 2015 by Martin Gallo, Core Security
#
# The library was designed and developed by Martin Gallo from the Security
# Consulting Services team of Core Security.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# ==============
# Standard imports
from random import randint
from socket import error as SocketError
from binascii import unhexlify as unhex
# Custom imports
from pysap.SAPRouter import SAPRoutedStreamSocket
from pysap.SAPDiag import SAPDiag, SAPDiagDP, SAPDiagItem
from pysap.SAPDiagItems import (user_connect_compressed,
user_connect_uncompressed,
support_data as default_support_data,
SAPDiagStep, SAPDiagSupportBits)
class SAPDiagConnection(object):
"""SAP Diag Connection
This class represents a basic client connection to a Diag server.
Handles initialization and further interaction by sending/receiving
packets.
"""
last_response = None
""" @ivar: stores the last response received from the server
@type: L{SAPNI<SAPNI.SAPNI>} """
initialized = False
""" @ivar: if the connection was initialized
@type: C{bool} """
step = 0
""" @ivar: number of the last dialog step performed
@type: C{int} """
def __init__(self, host, port, terminal=None, compress=False,
init=False, route=None, support_data=default_support_data):
"""Creates the connection to the Diag server.
@param host: remote host to connect to
@type host: C{string}
@param port: remote port to connect to
@type port: C{int}
@param terminal: terminal name to use when connecting to the server.
If no terminal name is specified, a random IP address will be
generated and used instead of the terminal name.
@type terminal: C{string}
@param compress: if true, the compression will be enabled for the
connection. Otherwise, the connection will be established using
compression.
@type compress: C{bool}
@param init: if true, the initialization will be performed after the
connection is established.
@type init: C{bool}
@param route: route to use for connecting through a SAP Router
@type route: C{string}
@param support_data: support data bits to use when initializing. It
identifies the client's capabilities.
@type support_data: L{SAPDiagItem} or L{SAPDiagSupportBits} or C{string}
"""
self.host = host
self.port = port
self.terminal = terminal or self.get_terminal_name()
self.route = route
self.support_data = self.get_support_data_item(support_data) or default_support_data
if compress is True:
self.compress = 1
else:
self.compress = 0
self._connection = None
if init:
self.init()
def connect(self):
"""Creates a L{SAPNIStreamSocket} connection to the host/port. If a route
was specified, connect to the target Diag server through the SAP Router.
"""
self._connection = SAPRoutedStreamSocket.get_nisocket(self.host,
self.port,
self.route,
base_cls=SAPDiag)
def get_terminal_name(self):
"""Generates a random IP address to use as a terminal name. In SAP
systems that don't implement SAP Note 1497445, the dispatcher registers
logs the terminal name as provided by the client, or fallbacks to
registering the IP address if the terminal name can't be resolved.
Using a random IP address as terminal name in unpatched systems will
make the 'terminal' field of the security audit log unreliable.
"""
return '.'.join('%s' % randint(0, 255) for __ in range(4))
def get_support_data_item(self, support_data):
if isinstance(support_data, str):
support_data = SAPDiagSupportBits(unhex(support_data))
if isinstance(support_data, SAPDiagSupportBits):
support_data = SAPDiagItem(item_type="APPL",
item_id="ST_USER",
item_sid="SUPPORTDATA",
item_value=support_data)
if isinstance(support_data, SAPDiagItem):
return support_data
return None
def init(self):
"""Sends an initialization request. If the socket wasn't created,
call the L{connect} method. If compression was specified, the
initialization will be performed using the respective User
Connect item.
@return: initialization response (usually login screen)
@rtype: L{SAPNI<SAPNI.SAPNI>}
"""
if self._connection is None:
self.connect()
# If the connection is compressed, use the respective User Connect item
if self.compress == 1:
user_connect = user_connect_compressed
else:
user_connect = user_connect_uncompressed
# The initialization is always performed uncompressed
self.initialized = True # XXX: Check that the respose was ok
return self.sr(SAPDiagDP(terminal=self.terminal) /
SAPDiag(compress=0, com_flag_TERM_INI=1) /
user_connect / self.support_data)
def send(self, packet):
"""Sends a packet using the L{SAPNIStreamSocket}
@param packet: packet to send
@type packet: L{SAPDiag<SAPDiag.SAPDiag>}
"""
if self._connection is not None:
self._connection.send(packet)
def receive(self):
"""Receive a L{SAPNI<SAPNI.SAPNI>} packet using the L{SAPNIStreamSocket}. Response is
returned and also stored in L{last_response}.
@return: packet received
@rtype: L{SAPNI<SAPNI.SAPNI>}
"""
if self._connection is not None:
self.last_response = self._connection.recv()
return self.last_response
else:
return None
def sr(self, packet):
"""Sends and receive a L{SAPNI<SAPNI.SAPNI>} packet using the L{SAPNIStreamSocket}
@param packet: packet to send
@type packet: L{SAPDiag<SAPDiag.SAPDiag>}
@return: packet received
@rtype: L{SAPNI<SAPNI.SAPNI>}
"""
if self._connection is not None:
self.send(packet)
self.last_response = self.receive()
return self.last_response
else:
return None
def close(self):
"""Send an 'end of connection' packet and closes the socket
"""
try:
self.send(SAPDiag(compress=0, com_flag_TERM_EOC=1))
self._connection.close()
except SocketError: # We don't care about socket errors at this time
pass
def sr_message(self, msg):
"""Sends and receive a L{SAPDiag<SAPDiag.SAPDiag>} message, prepending the
Diag header.
@param msg: items to send
@type msg: C{list} of L{SAPDiagItem}
@return: server's response
@rtype: L{SAPNI<SAPNI.SAPNI>}
"""
return self.sr(SAPDiag(compress=self.compress, message=msg))
def send_message(self, msg):
"""Sends a L{SAPDiag<SAPDiag.SAPDiag>} message, prepending the Diag header.
@param msg: items to send
@type msg: C{list} of L{SAPDiagItem}
"""
self.send(SAPDiag(compress=self.compress, message=msg))
def interact(self, message):
"""Interacts with the SAP Diag server, adding the L{SAPDiagStep} item and
ending with a 'end of message' item.
@param message: items to send
@type message: C{list} of L{SAPDiagItem}
@return: server's response
@rtype: L{SAPNI<SAPNI.SAPNI>}
"""
if self.initialized:
self.step = self.step + 1
message.insert(0, SAPDiagItem(item_type="APPL", item_id="ST_USER",
item_sid=0x26,
item_value=SAPDiagStep(step=self.step)))
message.append(SAPDiagItem(item_type="EOM"))
return self.sr_message(message)
else:
return None
| Minjung/pysap | pysap/SAPDiagClient.py | Python | gpl-2.0 | 9,026 |
# -*- coding: utf-8 -*-
import math
from itertools import izip, islice
from datetime import timedelta
from geopy import distance, Point as GpPoint
from geoalchemy2.shape import to_shape
def vincenty_distance(point1, point2):
distance.VincentyDistance.ELLIPSOID = 'WGS 84'
distance.distance = distance.VincentyDistance
p1 = GpPoint(point1.y, point1.x)
p2 = GpPoint(point2.y, point2.x)
return distance.distance(p1, p2).meters
def compute_speed(distance, delta):
if delta.total_seconds() == 0.0:
return 0.0
return distance / delta.total_seconds()
def get_stats(points):
total_distance_2d = 0.0
total_distance_3d = 0.0
flat_distance = 0.0
asc_distance = 0.0
desc_distance = 0.0
total_descent = 0.0
total_ascent = 0.0
active_time = 0.0
flat_time = 0.0
asc_time = 0.0
desc_time = 0.0
for current_point, next_point in izip(points, islice(points, 1, None)):
distance_2d = vincenty_distance(
to_shape(current_point.geom),
to_shape(next_point.geom)
)
if next_point.ele and current_point.ele:
distance_vertical = next_point.ele - current_point.ele
else:
distance_vertical = 0.0
distance_3d = math.sqrt(
math.pow(distance_2d, 2) + math.pow(distance_vertical, 2)
)
delta_time = next_point.time - current_point.time
time = delta_time.total_seconds()
if time > 0.0:
m = distance_3d / time
else:
m = 0.0
total_distance_2d += distance_2d
total_distance_3d += distance_3d
if m > 0.1:
active_time += time
if distance_vertical == 0.0:
flat_distance += distance_3d
if m > 0.1:
flat_time += time
elif distance_vertical > 0.0:
asc_distance += distance_3d
total_ascent += float(distance_vertical)
if m > 0.1:
asc_time += time
elif distance_vertical < 0.0:
desc_distance += distance_3d
total_descent += float(distance_vertical)
if m > 0.1:
desc_time += time
heights = [point.ele for point in points if point.ele]
if heights:
max_height = max(heights)
min_height = min(heights)
else:
max_height = 0.0
min_height = 0.0
return {
'distance_2d': total_distance_2d,
'distance_3d': total_distance_3d,
'distance_flat': flat_distance,
'distance_asc': asc_distance,
'distance_desc': desc_distance,
'total_descent': total_descent,
'total_ascent': total_ascent,
'max_height': max_height,
'min_height': min_height,
'active_time': timedelta(seconds=active_time),
'flat_time': timedelta(seconds=flat_time),
'asc_time': timedelta(seconds=asc_time),
'desc_time': timedelta(seconds=desc_time),
}
| atlefren/mineturer2 | computations.py | Python | mit | 2,979 |
"""
.. _tut_info_objects:
The :class:`Info <mne.Info>` data structure
===========================================
The :class:`Info <mne.Info>` data object is typically created
when data is imported into MNE-Python and contains details such as:
- date, subject information, and other recording details
- the sampling rate
- information about the data channels (name, type, position, etc.)
- digitized points
- sensor–head coordinate transformation matrices
and so forth. See the :class:`the API reference <mne.Info>`
for a complete list of all data fields. Once created, this object is passed
around throughout the data analysis pipeline.
"""
import mne
import os.path as op
###############################################################################
# :class:`mne.Info` behaves as a nested Python dictionary:
# Read the info object from an example recording
info = mne.io.read_info(
op.join(mne.datasets.sample.data_path(), 'MEG', 'sample',
'sample_audvis_raw.fif'), verbose=False)
###############################################################################
# List all the fields in the info object
print('Keys in info dictionary:\n', info.keys())
###############################################################################
# Obtain the sampling rate of the data
print(info['sfreq'], 'Hz')
###############################################################################
# List all information about the first data channel
print(info['chs'][0])
###############################################################################
# .. _picking_channels:
#
# Obtaining subsets of channels
# -----------------------------
#
# There are a number of convenience functions to obtain channel indices, given
# an :class:`mne.Info` object.
###############################################################################
# Get channel indices by name
channel_indices = mne.pick_channels(info['ch_names'], ['MEG 0312', 'EEG 005'])
###############################################################################
# Get channel indices by regular expression
channel_indices = mne.pick_channels_regexp(info['ch_names'], 'MEG *')
###############################################################################
# Channel types
# -------------
#
# MNE supports different channel types:
#
# - eeg : For EEG channels with data stored in Volts (V)
# - meg (mag) : For MEG magnetometers channels stored in Tesla (T)
# - meg (grad) : For MEG gradiometers channels stored in Tesla/Meter (T/m)
# - ecg : For ECG channels stored in Volts (V)
# - seeg : For Stereotactic EEG channels in Volts (V).
# - ecog : For Electrocorticography (ECoG) channels in Volts (V).
# - fnirs (HBO) : Functional near-infrared spectroscopy oxyhemoglobin data.
# - fnirs (HBR) : Functional near-infrared spectroscopy deoxyhemoglobin data.
# - emg : For EMG channels stored in Volts (V)
# - bio : For biological channels (AU).
# - stim : For the stimulus (a.k.a. trigger) channels (AU)
# - resp : For the response-trigger channel (AU)
# - chpi : For HPI coil channels (T).
# - exci : Flux excitation channel used to be a stimulus channel.
# - ias : For Internal Active Shielding data (maybe on Triux only).
# - syst : System status channel information (on Triux systems only).
#
# Get channel indices by type
channel_indices = mne.pick_types(info, meg=True) # MEG only
channel_indices = mne.pick_types(info, meg=False, eeg=True) # EEG only
###############################################################################
# MEG gradiometers and EEG channels
channel_indices = mne.pick_types(info, meg='grad', eeg=True)
###############################################################################
# Get a dictionary of channel indices, grouped by channel type
channel_indices_by_type = mne.io.pick.channel_indices_by_type(info)
print('The first three magnetometers:', channel_indices_by_type['mag'][:3])
###############################################################################
# Obtaining information about channels
# ------------------------------------
# Channel type of a specific channel
channel_type = mne.io.pick.channel_type(info, 75)
print('Channel #75 is of type:', channel_type)
###############################################################################
# Channel types of a collection of channels
meg_channels = mne.pick_types(info, meg=True)[:10]
channel_types = [mne.io.pick.channel_type(info, ch) for ch in meg_channels]
print('First 10 MEG channels are of type:\n', channel_types)
###############################################################################
# Dropping channels from an info structure
# ----------------------------------------
#
# It is possible to limit the info structure to only include a subset of
# channels with the :func:`mne.pick_info` function:
# Only keep EEG channels
eeg_indices = mne.pick_types(info, meg=False, eeg=True)
reduced_info = mne.pick_info(info, eeg_indices)
print(reduced_info)
| mne-tools/mne-tools.github.io | 0.17/_downloads/949f22b6526de1d6872c784fcf713da4/plot_info.py | Python | bsd-3-clause | 4,950 |
"""Test config validators."""
from datetime import date, datetime, timedelta
import enum
import os
from socket import _GLOBAL_DEFAULT_TIMEOUT
from unittest.mock import Mock, patch
import uuid
import pytest
import voluptuous as vol
import homeassistant
from homeassistant.helpers import config_validation as cv, template
def test_boolean():
"""Test boolean validation."""
schema = vol.Schema(cv.boolean)
for value in (
None,
"T",
"negative",
"lock",
"tr ue",
[],
[1, 2],
{"one": "two"},
test_boolean,
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ("true", "On", "1", "YES", " true ", "enable", 1, 50, True, 0.1):
assert schema(value)
for value in ("false", "Off", "0", "NO", "disable", 0, False):
assert not schema(value)
def test_latitude():
"""Test latitude validation."""
schema = vol.Schema(cv.latitude)
for value in ("invalid", None, -91, 91, "-91", "91", "123.01A"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ("-89", 89, "12.34"):
schema(value)
def test_longitude():
"""Test longitude validation."""
schema = vol.Schema(cv.longitude)
for value in ("invalid", None, -181, 181, "-181", "181", "123.01A"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ("-179", 179, "12.34"):
schema(value)
def test_port():
"""Test TCP/UDP network port."""
schema = vol.Schema(cv.port)
for value in ("invalid", None, -1, 0, 80000, "81000"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ("1000", 21, 24574):
schema(value)
def test_isfile():
"""Validate that the value is an existing file."""
schema = vol.Schema(cv.isfile)
fake_file = "this-file-does-not.exist"
assert not os.path.isfile(fake_file)
for value in ("invalid", None, -1, 0, 80000, fake_file):
with pytest.raises(vol.Invalid):
schema(value)
# patching methods that allow us to fake a file existing
# with write access
with patch("os.path.isfile", Mock(return_value=True)), patch(
"os.access", Mock(return_value=True)
):
schema("test.txt")
def test_url():
"""Test URL."""
schema = vol.Schema(cv.url)
for value in (
"invalid",
None,
100,
"htp://ha.io",
"http//ha.io",
"http://??,**",
"https://??,**",
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (
"http://localhost",
"https://localhost/test/index.html",
"http://home-assistant.io",
"http://home-assistant.io/test/",
"https://community.home-assistant.io/",
):
assert schema(value)
def test_platform_config():
"""Test platform config validation."""
options = ({}, {"hello": "world"})
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.PLATFORM_SCHEMA(value)
options = ({"platform": "mqtt"}, {"platform": "mqtt", "beer": "yes"})
for value in options:
cv.PLATFORM_SCHEMA_BASE(value)
def test_ensure_list():
"""Test ensure_list."""
schema = vol.Schema(cv.ensure_list)
assert [] == schema(None)
assert [1] == schema(1)
assert [1] == schema([1])
assert ["1"] == schema("1")
assert ["1"] == schema(["1"])
assert [{"1": "2"}] == schema({"1": "2"})
def test_entity_id():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_id)
with pytest.raises(vol.MultipleInvalid):
schema("invalid_entity")
assert schema("sensor.LIGHT") == "sensor.light"
def test_entity_ids():
"""Test entity ID validation."""
schema = vol.Schema(cv.entity_ids)
options = (
"invalid_entity",
"sensor.light,sensor_invalid",
["invalid_entity"],
["sensor.light", "sensor_invalid"],
["sensor.light,sensor_invalid"],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = ([], ["sensor.light"], "sensor.light")
for value in options:
schema(value)
assert schema("sensor.LIGHT, light.kitchen ") == ["sensor.light", "light.kitchen"]
def test_entity_domain():
"""Test entity domain validation."""
schema = vol.Schema(cv.entity_domain("sensor"))
for value in (
"invalid_entity",
"cover.demo",
"cover.demo,sensor.another_entity",
"",
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
assert schema("sensor.LIGHT") == "sensor.light"
schema = vol.Schema(cv.entity_domain(("sensor", "binary_sensor")))
for value in ("invalid_entity", "cover.demo"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
assert schema("sensor.LIGHT") == "sensor.light"
assert schema("binary_sensor.LIGHT") == "binary_sensor.light"
def test_entities_domain():
"""Test entities domain validation."""
schema = vol.Schema(cv.entities_domain("sensor"))
options = (
None,
"",
"invalid_entity",
["sensor.light", "cover.demo"],
["sensor.light", "sensor_invalid"],
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = ("sensor.light", ["SENSOR.light"], ["sensor.light", "sensor.demo"])
for value in options:
schema(value)
assert schema("sensor.LIGHT, sensor.demo ") == ["sensor.light", "sensor.demo"]
assert schema(["sensor.light", "SENSOR.demo"]) == ["sensor.light", "sensor.demo"]
def test_ensure_list_csv():
"""Test ensure_list_csv."""
schema = vol.Schema(cv.ensure_list_csv)
options = (None, 12, [], ["string"], "string1,string2")
for value in options:
schema(value)
assert schema("string1, string2 ") == ["string1", "string2"]
def test_event_schema():
"""Test event_schema validation."""
options = (
{},
None,
{"event_data": {}},
{"event": "state_changed", "event_data": 1},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.EVENT_SCHEMA(value)
options = (
{"event": "state_changed"},
{"event": "state_changed", "event_data": {"hello": "world"}},
)
for value in options:
cv.EVENT_SCHEMA(value)
def test_icon():
"""Test icon validation."""
schema = vol.Schema(cv.icon)
for value in (False, "work"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema("mdi:work")
schema("custom:prefix")
def test_time_period():
"""Test time_period validation."""
schema = vol.Schema(cv.time_period)
options = (
None,
"",
"hello:world",
"12:",
"12:34:56:78",
{},
{"wrong_key": -10},
"12.5:30",
"12:30.5",
"12.5:30:30",
"12:30.5:30",
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
("8:20", timedelta(hours=8, minutes=20)),
("23:59", timedelta(hours=23, minutes=59)),
("-8:20", -1 * timedelta(hours=8, minutes=20)),
("-1:15", -1 * timedelta(hours=1, minutes=15)),
("-23:59:59", -1 * timedelta(hours=23, minutes=59, seconds=59)),
("-48:00", -1 * timedelta(days=2)),
({"minutes": 5}, timedelta(minutes=5)),
(1, timedelta(seconds=1)),
("5", timedelta(seconds=5)),
("180", timedelta(seconds=180)),
("00:08:20.5", timedelta(minutes=8, seconds=20, milliseconds=500)),
("00:23:59.999", timedelta(minutes=23, seconds=59, milliseconds=999)),
("-00:08:20.5", -1 * timedelta(minutes=8, seconds=20, milliseconds=500)),
(
"-12:59:59.999",
-1 * timedelta(hours=12, minutes=59, seconds=59, milliseconds=999),
),
({"milliseconds": 1.5}, timedelta(milliseconds=1, microseconds=500)),
({"seconds": "1.5"}, timedelta(seconds=1, milliseconds=500)),
({"minutes": "1.5"}, timedelta(minutes=1, seconds=30)),
({"hours": -1.5}, -1 * timedelta(hours=1, minutes=30)),
({"days": "-1.5"}, -1 * timedelta(days=1, hours=12)),
)
for value, result in options:
assert schema(value) == result
def test_remove_falsy():
"""Test remove falsy."""
assert cv.remove_falsy([0, None, 1, "1", {}, [], ""]) == [1, "1"]
def test_service():
"""Test service validation."""
schema = vol.Schema(cv.service)
with pytest.raises(vol.MultipleInvalid):
schema("invalid_turn_on")
schema("homeassistant.turn_on")
def test_service_schema():
"""Test service_schema validation."""
options = (
{},
None,
{
"service": "homeassistant.turn_on",
"service_template": "homeassistant.turn_on",
},
{"data": {"entity_id": "light.kitchen"}},
{"service": "homeassistant.turn_on", "data": None},
{
"service": "homeassistant.turn_on",
"data_template": {"brightness": "{{ no_end"},
},
)
for value in options:
with pytest.raises(vol.MultipleInvalid):
cv.SERVICE_SCHEMA(value)
options = (
{"service": "homeassistant.turn_on"},
{"service": "homeassistant.turn_on", "entity_id": "light.kitchen"},
{"service": "light.turn_on", "entity_id": "all"},
{
"service": "homeassistant.turn_on",
"entity_id": ["light.kitchen", "light.ceiling"],
},
)
for value in options:
cv.SERVICE_SCHEMA(value)
def test_slug():
"""Test slug validation."""
schema = vol.Schema(cv.slug)
for value in (None, "hello world"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (12345, "hello"):
schema(value)
def test_string(hass):
"""Test string validation."""
schema = vol.Schema(cv.string)
with pytest.raises(vol.Invalid):
schema(None)
with pytest.raises(vol.Invalid):
schema([])
with pytest.raises(vol.Invalid):
schema({})
for value in (True, 1, "hello"):
schema(value)
# Test template support
for text, native in (
("[1, 2]", [1, 2]),
("{1, 2}", {1, 2}),
("(1, 2)", (1, 2)),
('{"hello": True}', {"hello": True}),
):
tpl = template.Template(text, hass)
result = tpl.async_render()
assert isinstance(result, template.ResultWrapper)
assert result == native
assert schema(result) == text
def test_string_with_no_html():
"""Test string with no html validation."""
schema = vol.Schema(cv.string_with_no_html)
with pytest.raises(vol.Invalid):
schema("This has HTML in it <a>Link</a>")
with pytest.raises(vol.Invalid):
schema("<b>Bold</b>")
for value in (
True,
3,
"Hello",
"**Hello**",
"This has no HTML [Link](https://home-assistant.io)",
):
schema(value)
def test_temperature_unit():
"""Test temperature unit validation."""
schema = vol.Schema(cv.temperature_unit)
with pytest.raises(vol.MultipleInvalid):
schema("K")
schema("C")
schema("F")
def test_x10_address():
"""Test x10 addr validator."""
schema = vol.Schema(cv.x10_address)
with pytest.raises(vol.Invalid):
schema("Q1")
schema("q55")
schema("garbage_addr")
schema("a1")
schema("C11")
def test_template():
"""Test template validator."""
schema = vol.Schema(cv.template)
for value in (None, "{{ partial_print }", "{% if True %}Hello", ["test"]):
with pytest.raises(vol.Invalid):
schema(value)
options = (
1,
"Hello",
"{{ beer }}",
"{% if 1 == 1 %}Hello{% else %}World{% endif %}",
)
for value in options:
schema(value)
def test_dynamic_template():
"""Test dynamic template validator."""
schema = vol.Schema(cv.dynamic_template)
for value in (
None,
1,
"{{ partial_print }",
"{% if True %}Hello",
["test"],
"just a string",
):
with pytest.raises(vol.Invalid):
schema(value)
options = (
"{{ beer }}",
"{% if 1 == 1 %}Hello{% else %}World{% endif %}",
)
for value in options:
schema(value)
def test_template_complex():
"""Test template_complex validator."""
schema = vol.Schema(cv.template_complex)
for value in ("{{ partial_print }", "{% if True %}Hello"):
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = (
1,
"Hello",
"{{ beer }}",
"{% if 1 == 1 %}Hello{% else %}World{% endif %}",
{"test": 1, "test2": "{{ beer }}"},
["{{ beer }}", 1],
)
for value in options:
schema(value)
# ensure the validator didn't mutate the input
assert options == (
1,
"Hello",
"{{ beer }}",
"{% if 1 == 1 %}Hello{% else %}World{% endif %}",
{"test": 1, "test2": "{{ beer }}"},
["{{ beer }}", 1],
)
# Ensure we don't mutate non-string types that cannot be templates.
for value in (1, True, None):
assert schema(value) == value
def test_time_zone():
"""Test time zone validation."""
schema = vol.Schema(cv.time_zone)
with pytest.raises(vol.MultipleInvalid):
schema("America/Do_Not_Exist")
schema("America/Los_Angeles")
schema("UTC")
def test_date():
"""Test date validation."""
schema = vol.Schema(cv.date)
for value in ["Not a date", "23:42", "2016-11-23T18:59:08"]:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().date())
schema("2016-11-23")
def test_time():
"""Test date validation."""
schema = vol.Schema(cv.time)
for value in ["Not a time", "2016-11-23", "2016-11-23T18:59:08"]:
with pytest.raises(vol.Invalid):
schema(value)
schema(datetime.now().time())
schema("23:42:00")
schema("23:42")
def test_datetime():
"""Test date time validation."""
schema = vol.Schema(cv.datetime)
for value in [date.today(), "Wrong DateTime"]:
with pytest.raises(vol.MultipleInvalid):
schema(value)
schema(datetime.now())
schema("2016-11-23T18:59:08")
def test_multi_select():
"""Test multi select validation.
Expected behavior:
- Will not accept any input but a list
- Will not accept selections outside of configured scope
"""
schema = vol.Schema(cv.multi_select({"paulus": "Paulus", "robban": "Robban"}))
with pytest.raises(vol.Invalid):
schema("robban")
schema(["paulus", "martinhj"])
schema(["robban", "paulus"])
def test_multi_select_in_serializer():
"""Test multi_select with custom_serializer."""
assert cv.custom_serializer(cv.multi_select({"paulus": "Paulus"})) == {
"type": "multi_select",
"options": {"paulus": "Paulus"},
}
def test_boolean_in_serializer():
"""Test boolean with custom_serializer."""
assert cv.custom_serializer(cv.boolean) == {
"type": "boolean",
}
def test_string_in_serializer():
"""Test string with custom_serializer."""
assert cv.custom_serializer(cv.string) == {
"type": "string",
}
def test_positive_time_period_dict_in_serializer():
"""Test positive_time_period_dict with custom_serializer."""
assert cv.custom_serializer(cv.positive_time_period_dict) == {
"type": "positive_time_period_dict",
}
@pytest.fixture
def schema():
"""Create a schema used for testing deprecation."""
return vol.Schema({"venus": cv.boolean, "mars": cv.boolean, "jupiter": cv.boolean})
@pytest.fixture
def version(monkeypatch):
"""Patch the version used for testing to 0.5.0."""
monkeypatch.setattr(homeassistant.const, "__version__", "0.5.0")
def test_deprecated_with_no_optionals(caplog, schema):
"""
Test deprecation behaves correctly when optional params are None.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema without changing any values
- No warning or difference in output if key is not provided
"""
deprecated_schema = vol.All(cv.deprecated("mars"), schema)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name in [
__name__,
"homeassistant.helpers.config_validation",
]
assert (
"The 'mars' option is deprecated, please remove it from your configuration"
) in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_replacement_key(caplog, schema):
"""
Test deprecation behaves correctly when only a replacement key is provided.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning or difference in output if neither key nor
replacement_key are provided
"""
deprecated_schema = vol.All(
cv.deprecated("mars", replacement_key="jupiter"), schema
)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
assert {"jupiter": True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"jupiter": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_default(caplog, schema):
"""
Test deprecation behaves correctly with a default value.
This is likely a scenario that would never occur.
Expected behavior:
- Behaves identically as when the default value was not present
"""
deprecated_schema = vol.All(cv.deprecated("mars", default=False), schema)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert caplog.records[0].name == __name__
assert (
"The 'mars' option is deprecated, please remove it from your configuration"
) in caplog.text
assert test_data == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
def test_deprecated_with_replacement_key_and_default(caplog, schema):
"""
Test deprecation with a replacement key and default.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning if neither key nor replacement_key are provided
- Adds replacement_key with default value in this case
"""
deprecated_schema = vol.All(
cv.deprecated("mars", replacement_key="jupiter", default=False), schema
)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
assert {"jupiter": True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"jupiter": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert {"venus": True, "jupiter": False} == output
deprecated_schema_with_default = vol.All(
vol.Schema(
{
"venus": cv.boolean,
vol.Optional("mars", default=False): cv.boolean,
vol.Optional("jupiter", default=False): cv.boolean,
}
),
cv.deprecated("mars", replacement_key="jupiter", default=False),
)
test_data = {"mars": True}
output = deprecated_schema_with_default(test_data.copy())
assert len(caplog.records) == 1
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
assert {"jupiter": True} == output
def test_deprecated_cant_find_module():
"""Test if the current module cannot be inspected."""
with patch("inspect.getmodule", return_value=None):
# This used to raise.
cv.deprecated(
"mars",
replacement_key="jupiter",
default=False,
)
def test_key_dependency():
"""Test key_dependency validator."""
schema = vol.Schema(cv.key_dependency("beer", "soda"))
options = {"beer": None}
for value in options:
with pytest.raises(vol.MultipleInvalid):
schema(value)
options = ({"beer": None, "soda": None}, {"soda": None}, {})
for value in options:
schema(value)
def test_has_at_most_one_key():
"""Test has_at_most_one_key validator."""
schema = vol.Schema(cv.has_at_most_one_key("beer", "soda"))
for value in (None, [], {"beer": None, "soda": None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({}, {"beer": None}, {"soda": None}):
schema(value)
def test_has_at_least_one_key():
"""Test has_at_least_one_key validator."""
schema = vol.Schema(cv.has_at_least_one_key("beer", "soda"))
for value in (None, [], {}, {"wine": None}):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in ({"beer": None}, {"soda": None}):
schema(value)
def test_enum():
"""Test enum validator."""
class TestEnum(enum.Enum):
"""Test enum."""
value1 = "Value 1"
value2 = "Value 2"
schema = vol.Schema(cv.enum(TestEnum))
with pytest.raises(vol.Invalid):
schema("value3")
def test_socket_timeout(): # pylint: disable=invalid-name
"""Test socket timeout validator."""
schema = vol.Schema(cv.socket_timeout)
with pytest.raises(vol.Invalid):
schema(0.0)
with pytest.raises(vol.Invalid):
schema(-1)
assert _GLOBAL_DEFAULT_TIMEOUT == schema(None)
assert schema(1) == 1.0
def test_matches_regex():
"""Test matches_regex validator."""
schema = vol.Schema(cv.matches_regex(".*uiae.*"))
with pytest.raises(vol.Invalid):
schema(1.0)
with pytest.raises(vol.Invalid):
schema(" nrtd ")
test_str = "This is a test including uiae."
assert schema(test_str) == test_str
def test_is_regex():
"""Test the is_regex validator."""
schema = vol.Schema(cv.is_regex)
with pytest.raises(vol.Invalid):
schema("(")
with pytest.raises(vol.Invalid):
schema({"a dict": "is not a regex"})
valid_re = ".*"
schema(valid_re)
def test_comp_entity_ids():
"""Test config validation for component entity IDs."""
schema = vol.Schema(cv.comp_entity_ids)
for valid in (
"ALL",
"all",
"AlL",
"light.kitchen",
["light.kitchen"],
["light.kitchen", "light.ceiling"],
[],
):
schema(valid)
for invalid in (["light.kitchen", "not-entity-id"], "*", ""):
with pytest.raises(vol.Invalid):
schema(invalid)
def test_uuid4_hex(caplog):
"""Test uuid validation."""
schema = vol.Schema(cv.uuid4_hex)
for value in ["Not a hex string", "0", 0]:
with pytest.raises(vol.Invalid):
schema(value)
with pytest.raises(vol.Invalid):
# the 13th char should be 4
schema("a03d31b22eee1acc9b90eec40be6ed23")
with pytest.raises(vol.Invalid):
# the 17th char should be 8-a
schema("a03d31b22eee4acc7b90eec40be6ed23")
_hex = uuid.uuid4().hex
assert schema(_hex) == _hex
assert schema(_hex.upper()) == _hex
def test_key_value_schemas():
"""Test key value schemas."""
schema = vol.Schema(
cv.key_value_schemas(
"mode",
{
"number": vol.Schema({"mode": "number", "data": int}),
"string": vol.Schema({"mode": "string", "data": str}),
},
)
)
with pytest.raises(vol.Invalid) as excinfo:
schema(True)
assert str(excinfo.value) == "Expected a dictionary"
for mode in None, "invalid":
with pytest.raises(vol.Invalid) as excinfo:
schema({"mode": mode})
assert (
str(excinfo.value)
== f"Unexpected value for mode: '{mode}'. Expected number, string"
)
with pytest.raises(vol.Invalid) as excinfo:
schema({"mode": "number", "data": "string-value"})
assert str(excinfo.value) == "expected int for dictionary value @ data['data']"
with pytest.raises(vol.Invalid) as excinfo:
schema({"mode": "string", "data": 1})
assert str(excinfo.value) == "expected str for dictionary value @ data['data']"
for mode, data in (("number", 1), ("string", "hello")):
schema({"mode": mode, "data": data})
def test_script(caplog):
"""Test script validation is user friendly."""
for data, msg in (
({"delay": "{{ invalid"}, "should be format 'HH:MM'"),
({"wait_template": "{{ invalid"}, "invalid template"),
({"condition": "invalid"}, "Unexpected value for condition: 'invalid'"),
({"event": None}, "string value is None for dictionary value @ data['event']"),
(
{"device_id": None},
"string value is None for dictionary value @ data['device_id']",
),
(
{"scene": "light.kitchen"},
"Entity ID 'light.kitchen' does not belong to domain 'scene'",
),
):
with pytest.raises(vol.Invalid) as excinfo:
cv.script_action(data)
assert msg in str(excinfo.value)
def test_whitespace():
"""Test whitespace validation."""
schema = vol.Schema(cv.whitespace)
for value in (
None,
"" "T",
"negative",
"lock",
"tr ue",
[],
[1, 2],
{"one": "two"},
):
with pytest.raises(vol.MultipleInvalid):
schema(value)
for value in (" ", " "):
assert schema(value)
| turbokongen/home-assistant | tests/helpers/test_config_validation.py | Python | apache-2.0 | 27,470 |
#TODO: This file will generate a database and popuplate it will fake data
import json
from app import app, db
from app import models
#Create the table
db.create_all()
json_data = open('default_services.json')
data = json.load(json_data)
for service in data:
s = models.Service()
s.serviceName = service['name']
s.description = service['description']
s.metaData = service['metadata']
s.type = service['type']
db.session.add(s)
print("=====")
print("Creating New Service")
print(service['name'])
print(service['description'])
print(service['metadata'])
print(service['type'])
print("=====\n")
db.session.commit()
#TODO: Load in JSON file of services
#TODO: For each service type add the information
| byu-osl/city-issue-tracker | generate_db.py | Python | gpl-2.0 | 726 |
import braintree
from braintree.resource import Resource
# NEXT_MAJOR_VERSION - rename to GooglePayCard
class AndroidPayCard(Resource):
"""
A class representing Braintree Android Pay card objects.
"""
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if hasattr(self, 'expired'):
self.is_expired = self.expired
if "subscriptions" in attributes:
self.subscriptions = [braintree.subscription.Subscription(gateway, subscription) for subscription in self.subscriptions]
@property
def expiration_date(self):
return self.expiration_month + "/" + self.expiration_year
@property
def last_4(self):
return self.virtual_card_last_4
@property
def card_type(self):
return self.virtual_card_type
| braintree/braintree_python | braintree/android_pay_card.py | Python | mit | 836 |
"""
AUTHOR: Dr. Andrew David Burbanks, 2005.
This software is Copyright (C) 2004-2008 Bristol University
and is released under the GNU General Public License version 2.
MODULE: Powers
PURPOSE:
At present, this is a nasty mechanism for switching between powers
representations.
NOTES:
A program making use of powers imports this class in order to ensure a
single powers representation is in use in the program. The need for
this arose when implementing hash for storage of disparate powers
representations in the same dictionary, or comparison based on hash
values.
There must be cleaner ways to accomplish this!
"""
from PowersBase import PowersBase
if 0:
from TuplePowers import TuplePowers as Powers
else:
from SparsePowers import SparsePowers as Powers
| Peter-Collins/NormalForm | src/py/Powers.py | Python | gpl-2.0 | 776 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from uthportal.tasks.course import CourseTask
class ce342(CourseTask):
document_prototype = {
'code': 'ce342',
'announcements': {
'link_site': '',
'link_eclass': 'http://eclass.uth.gr/eclass/modules/announcements/rss.php?c=MHX256'
},
'info': {
'name': u'Ψηφιακή Επεξεργασία Σήματος',
'code_site': u'HY342',
'code_eclass': u'MHX256',
'link_site': '',
'link_eclass': 'http://eclass.uth.gr/eclass/courses/MHX256/'
}
}
| kkanellis/uthportal-server | uthportal/library/inf/courses/ce342.py | Python | gpl-3.0 | 621 |
import json
import logging
import re
logger = logging.getLogger(__name__)
csRegStr4Hash = '\B(?P<Type>[\@\#\$\%\&])(?P<Text>\S+)'
def FindHashes(srcStr, RegStr):
#\B(?P<Type>[\@\#\$\%\&])(?P<Text>\S+)
regex = r'' +RegStr
matches = re.finditer(regex, srcStr, re.IGNORECASE | re.UNICODE) #
plain = []
for matchNum, match in enumerate(matches):
agr = match.groups()
k = agr[0]
word = agr[1]
plain.append({k: word})
if (len(plain) > 0):
return plain
return None
def Split_CMD(SrcMessage):
if (SrcMessage.startswith('mcc')):
if (SrcMessage.startswith('mcc?')):
return {"cmd":"lst"}
if (SrcMessage.startswith('mcc+')):
return {"cmd":"add"}
if (SrcMessage.startswith('mcc-')):
return {"cmd":"del"}
if (SrcMessage.startswith('sms:')):
hashes = FindHashes(SrcMessage, csRegStr4Hash)
if (hashes):
return {"cmd":"sms", 'hashes': hashes}
else:
return {"cmd":"sms", 'hashes': None}
return None;
class RtmEventHandler(object):
def __init__(self, slack_clients, msg_writer):
self.clients = slack_clients
self.msg_writer = msg_writer
self.log = {};
def handle(self, event):
if 'type' in event:
self._handle_by_type(event['type'], event)
def _handle_by_type(self, event_type, event):
# See https://api.slack.com/rtm for a full list of events
if event_type == 'error':
# error
self.msg_writer.write_error(event['channel'], json.dumps(event))
elif event_type == 'message':
# message was sent to channel
self._handle_message(event)
elif event_type == 'channel_joined':
# you joined a channel
self.msg_writer.write_help_message(event['channel'])
elif event_type == 'group_joined':
# you joined a private group
self.msg_writer.write_help_message(event['channel'])
else:
pass
def _handle_message(self, event):
# Filter out messages from the bot itself, and from non-users (eg. webhooks)
#if event['team'] not in self.log:
# self.log[str(event['team'])] ={}
#self.log[str(event['team'])][str(event['user'])] = event
if ('user' in event) and (not self.clients.is_message_from_me(event['user'])):
msg_txt = event['text']
if self.clients.is_bot_mention(msg_txt) or self._is_direct_message(event['channel']):
# e.g. user typed: "@pybot tell me a joke!"
MCC = Split_CMD(msg_txt)
if MCC:
self.msg_writer.send_message(event['channel'], 'This is my message! '+json.dumps(MCC))
elif '?' == msg_txt:
self.msg_writer.send_message(event['channel'], json.dumps(event)+'\n'+json.dumps(self.log))
elif 'help' in msg_txt:
self.msg_writer.write_help_message(event['channel'])
elif re.search('hi|hey|hello|howdy', msg_txt):
self.msg_writer.write_greeting(event['channel'], event['user'])
elif 'joke' in msg_txt:
self.msg_writer.write_joke(event['channel'])
elif 'attachment' in msg_txt:
self.msg_writer.demo_attachment(event['channel'])
elif 'echo' in msg_txt:
self.msg_writer.send_message(event['channel'], msg_txt)
else:
self.msg_writer.write_prompt(event['channel'])
def _is_direct_message(self, channel):
"""Check if channel is a direct message channel
Args:
channel (str): Channel in which a message was received
"""
return channel.startswith('D')
| SpyDeX/BeepMiBot | bot/event_handler.py | Python | mit | 3,806 |
# -*- encoding: utf-8 -*-
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.api import app
from ceilometer import service
import sys
def main():
service.prepare_service()
srv = app.build_server()
srv.serve_forever()
if __name__ == '__main__':
sys.exit(main())
| luogangyi/Ceilometer-oVirt | ceilometer/cmd/api.py | Python | apache-2.0 | 835 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import importlib
import logging
import os
import smtplib
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formatdate
from typing import Iterable, List, Union
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException
log = logging.getLogger(__name__)
def send_email(to, subject, html_content,
files=None, dryrun=False, cc=None, bcc=None,
mime_subtype='mixed', mime_charset='utf-8', **kwargs):
"""
Send email using backend specified in EMAIL_BACKEND.
"""
path, attr = conf.get('email', 'EMAIL_BACKEND').rsplit('.', 1)
module = importlib.import_module(path)
backend = getattr(module, attr)
to = get_email_address_list(to)
to = ", ".join(to)
return backend(to, subject, html_content, files=files,
dryrun=dryrun, cc=cc, bcc=bcc,
mime_subtype=mime_subtype, mime_charset=mime_charset, **kwargs)
def send_email_smtp(to, subject, html_content, files=None,
dryrun=False, cc=None, bcc=None,
mime_subtype='mixed', mime_charset='utf-8',
**kwargs):
"""
Send an email with html content
>>> send_email('test@example.com', 'foo', '<b>Foo</b> bar', ['/dev/null'], dryrun=True)
"""
smtp_mail_from = conf.get('smtp', 'SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = smtp_mail_from
msg['To'] = ", ".join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ", ".join(cc)
recipients = recipients + cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients = recipients + bcc
msg['Date'] = formatdate(localtime=True)
mime_text = MIMEText(html_content, 'html', mime_charset)
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as file:
part = MIMEApplication(
file.read(),
Name=basename
)
part['Content-Disposition'] = 'attachment; filename="%s"' % basename
part['Content-ID'] = '<%s>' % basename
msg.attach(part)
send_mime_email(smtp_mail_from, recipients, msg, dryrun)
def send_mime_email(e_from, e_to, mime_msg, dryrun=False):
"""
Send MIME email.
"""
smtp_host = conf.get('smtp', 'SMTP_HOST')
smtp_port = conf.getint('smtp', 'SMTP_PORT')
smtp_starttls = conf.getboolean('smtp', 'SMTP_STARTTLS')
smtp_ssl = conf.getboolean('smtp', 'SMTP_SSL')
smtp_user = None
smtp_password = None
try:
smtp_user = conf.get('smtp', 'SMTP_USER')
smtp_password = conf.get('smtp', 'SMTP_PASSWORD')
except AirflowConfigException:
log.debug("No user/password found for SMTP, so logging in with no authentication.")
if not dryrun:
conn = smtplib.SMTP_SSL(smtp_host, smtp_port) if smtp_ssl else smtplib.SMTP(smtp_host, smtp_port)
if smtp_starttls:
conn.starttls()
if smtp_user and smtp_password:
conn.login(smtp_user, smtp_password)
log.info("Sent an alert email to %s", e_to)
conn.sendmail(e_from, e_to, mime_msg.as_string())
conn.quit()
def get_email_address_list(addresses: Union[str, Iterable[str]]) -> List[str]:
"""
Get list of email addresses.
"""
if isinstance(addresses, str):
return _get_email_list_from_str(addresses)
elif isinstance(addresses, collections.abc.Iterable):
if not all(isinstance(item, str) for item in addresses):
raise TypeError("The items in your iterable must be strings.")
return list(addresses)
received_type = type(addresses).__name__
raise TypeError("Unexpected argument type: Received '{}'.".format(received_type))
def _get_email_list_from_str(addresses: str) -> List[str]:
delimiters = [",", ";"]
for delimiter in delimiters:
if delimiter in addresses:
return [address.strip() for address in addresses.split(delimiter)]
return [addresses]
| wileeam/airflow | airflow/utils/email.py | Python | apache-2.0 | 5,108 |
'''
Created on Dec 3, 2014
@author: gearsad
'''
import sys
from roverpylot import rover
from bot_update_t import bot_update_t
from bot_control_command_t import bot_control_command_t
import lcm
# Try to start OpenCV for video
try:
import cv
except:
cv = None
class LCMRover(rover.Rover):
'''
A rover using LCM for control and camera feed upstream
'''
def Initialize(self, botname):
'''
Init the rover and store the name
'''
self.__botname = botname
self.__lcm = lcm.LCM("udpm://239.255.76.67:7667?ttl=1")
self.__controlSubscription = self.__lcm.subscribe("ARNerve_Bot_Control_" + self.__botname, self.UpdateBotControlHandler)
self.__lightsOn = 0
self.__infraredOn = 0
def processVideo(self, jpegbytes):
#try:
camUpdate = bot_update_t()
camUpdate.name = self.__botname
camUpdate.numBytes_cameraFrameJpeg = len(jpegbytes)
camUpdate.cameraFrameJpeg = jpegbytes
# Get the battery health as well
battery = self.getBatteryPercentage()
camUpdate.batteryPercentage = battery
self.__lcm.publish("ARNerve_Bot_Update_" + self.__botname, camUpdate.encode())
#except:
# print "Exception", sys.exc_info()[0]
# pass
def Update(self):
'''
Update the LCM
'''
self.__lcm.handle()
def Disconnect(self):
self.lc.unsubscribe(self.__controlSubscription)
def UpdateBotControlHandler(self, channel, data):
'''
Get the updated bot parameters and send them to the bot.
'''
controlParams = bot_control_command_t.decode(data)
# Check if it is the right bot.
if self.__botname != controlParams.name:
return
self.setTreads(controlParams.botTreadVelLeft, controlParams.botTreadVelright)
print "Setting the treads to {0}, {1}".format(controlParams.botTreadVelLeft, controlParams.botTreadVelright)
if self.__lightsOn != controlParams.isLightsOn:
if controlParams.isLightsOn != 0:
self.turnLightsOn()
else:
self.turnLightsOff()
self.__lightsOn = controlParams.isLightsOn
if self.__infraredOn != controlParams.isInfraredOn:
if controlParams.isInfraredOn != 0:
self.turnInfraredOn()
else:
self.turnInfraredOff()
self.__infraredOn = controlParams.isInfraredOn
| GearsAD/semisorted_arnerve | arnerve_bot/arnerve_bot/LCMRover.py | Python | mit | 2,720 |
"""Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
__init__.py for datasets package
"""
from .msrvtt import MSRVTTDataset
msrvtt_dataset = MSRVTTDataset() | googleinterns/via-content-understanding | videoretrieval/datasets/__init__.py | Python | apache-2.0 | 661 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup (
name = "Nark",
version = "0.1",
description="Various python utilities",
author="Douglas Linder",
author_email="", # Removed to limit spam harvesting.
url="",
package_dir = {'': 'src'},
packages = find_packages("src", exclude="tests"),
zip_safe = True
)
| shadowmint/python-nark | setup.py | Python | apache-2.0 | 347 |
import controllers
import models | smartforceplus/SmartForceplus | openerp/addons/tag_website_landing_pages/__init__.py | Python | agpl-3.0 | 33 |
# show how product information is collected from an API
# first import packages
import urllib2
import json
import pandas as pd
# this is your api information
my_api_key = "XXXXXXXXXXX"
# build the api url, passing in your api key
url = "http://api.shopstyle.com/api/v2/"
ties = "{}products?pid={}&cat=mens-ties&limit=100".format(url, my_api_key)
# open the connection to the api endpoint
jsonResponse = urllib2.urlopen(ties)
data = json.load(jsonResponse)
# parse the response to find out how many pages of results there are
total = data['metadata']['total']
limit = data['metadata']['limit']
offset = data['metadata']['offset']
pages = (total / limit)
print "{} total, {} per page. {} pages to process".format(total, limit, pages)
# tmp = pd.DataFrame(data['products'])
# set up an empty dictionary
dfs = {}
# connect with api again, page by page and save the results to the dictionary
for page in range(pages+1):
allTies = "{}products?pid={}&cat=mens-ties&limit=100&offset={}&sort=popular".format(url, mykeys.apiKey, (page*50))
jsonResponse = urllib2.urlopen(allTies)
data = json.load(jsonResponse)
dfs[page] = pd.DataFrame(data['products'])
# convert the dictionary to a pandas data frame object
df = pd.concat(dfs, ignore_index=True)
# Cleaning records, removing duplicates
df = df.drop_duplicates('id')
df['priceLabel'] = df['priceLabel'].str.replace('$', '')
df['priceLabel'] = df['priceLabel'].astype(float)
# continue cleaning up the data, split data into columns as necesary
def breakId(x, y = 0):
try:
y = x["id"]
except:
pass
return y
def breakName(x, y=""):
try:
y = x["name"]
except:
pass
return y
df['brandId'] = df['brand'].map(breakId);
df['brandName'] = df['brand'].map(breakName);
def breakCanC(x,y=""):
try:
y = x[0]["canonicalColors"][0]["name"]
except:
pass
return y
def breakColorName(x, y=""):
try:
y = x[0]["name"]
except:
pass
return y
def breakColorId(x, y=""):
try:
y = x[0]["canonicalColors"][0]["id"]
except:
pass
return y
df['colorId'] = df['colors'].map(breakColorId);
df['colorFamily'] = df['colors'].map(breakCanC);
df['colorNamed'] = df['colors'].map(breakColorName);
# export to data.csv
df.to_csv("data.csv", sep='\t', encoding='utf-8',
columns=['id', 'priceLabel', 'name','brandId', 'brandName', 'colorId', 'colorFamily', 'colorNamed'])
| katychuang/python-data-sci-basics | teachers_notes/api_example.py | Python | mit | 2,467 |
# SPDX-License-Identifier: GPL-2.0-or-later
from .gi_composites import GtkTemplate
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import GObject, Gtk # noqa
@GtkTemplate(ui="/org/freedesktop/Piper/ui/ProfileRow.ui")
class ProfileRow(Gtk.ListBoxRow):
"""A Gtk.ListBoxRow subclass containing the widgets to display a profile in
the profile poper."""
__gtype_name__ = "ProfileRow"
title = GtkTemplate.Child()
def __init__(self, profile, *args, **kwargs):
Gtk.ListBoxRow.__init__(self, *args, **kwargs)
self.init_template()
self._profile = profile
self._profile.connect("notify::enabled", self._on_profile_notify_enabled)
name = profile.name
if not name:
name = 'Profile {}'.format(profile.index)
self.title.set_text(name)
self.show_all()
self.set_visible(profile.enabled)
def _on_profile_notify_enabled(self, profile, pspec):
self.set_visible(profile.enabled)
@GtkTemplate.Callback
def _on_delete_button_clicked(self, button):
self._profile.enabled = False
def set_active(self):
"""Activates the profile paired with this row."""
self._profile.set_active()
@GObject.Property
def name(self):
return self.title.get_text()
| libratbag/piper | piper/profilerow.py | Python | gpl-2.0 | 1,313 |
from django.utils import simplejson as json
import httplib2
BASE_SERVER = 'http://djangopackages.com'
API_SERVER = '%s/api/v1/' % BASE_SERVER
def import_project(project):
URL = API_SERVER + "package/%s/" % project.slug
h = httplib2.Http(timeout=5)
try:
resp, content = h.request(URL, "GET")
except AttributeError:
print "Socket error trying to pull from Django Packages"
return False
if resp['status'] == '200':
content_dict = json.loads(content)
project.django_packages_url = BASE_SERVER + content_dict['absolute_url']
project.save()
return True
return False
| alex/readthedocs.org | readthedocs/tastyapi/client.py | Python | mit | 642 |
# -*- coding: utf8 -*-
#
# Copyright (C) 2019 NDP Systèmes (<http://www.ndp-systemes.fr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
#
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
{
'name': "XlsxWriter Utility",
'version': '0.1',
'author': 'NDP Systèmes',
'maintainer': 'NDP Systèmes',
'category': 'Technical',
'description': """
XlsxWriter Utility
==================
Add some utility function to create report with xlsx writer lib
""",
'depends': ['base'],
'qweb': [],
'data': [],
}
| ndp-systemes/odoo-addons | xlsxwriter_utility/__openerp__.py | Python | agpl-3.0 | 1,142 |
import logging
from angr.procedures.stubs.format_parser import FormatParser
from cle.backends.externs.simdata.io_file import io_file_data_for_arch
l = logging.getLogger(name=__name__)
######################################
# fprintf
######################################
class fprintf(FormatParser):
def run(self, file_ptr):
fd_offset = io_file_data_for_arch(self.state.arch)['fd']
fileno = self.state.mem[file_ptr + fd_offset:].int.resolved
simfd = self.state.posix.get_fd(fileno)
if simfd is None:
return -1
# The format str is at index 1
fmt_str = self._parse(1)
out_str = fmt_str.replace(2, self.arg)
simfd.write_data(out_str, out_str.size() // 8)
return out_str.size() // 8
| iamahuman/angr | angr/procedures/libc/fprintf.py | Python | bsd-2-clause | 777 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os_resource_classes as orc
import os_traits
import six
from nova import context as nova_context
from nova import exception
from nova import objects
from nova.tests.functional.api import client as api_client
from nova.tests.functional import integrated_helpers
from nova import utils
class TestServicesAPI(integrated_helpers.ProviderUsageBaseTestCase):
compute_driver = 'fake.SmallFakeDriver'
def test_compute_service_delete_ensure_related_cleanup(self):
"""Tests deleting a compute service and the related cleanup associated
with that like the compute_nodes table entry, removing the host
from any aggregates, the host mapping in the API DB and the associated
resource provider in Placement.
"""
compute = self._start_compute('host1')
# Make sure our compute host is represented as expected.
services = self.admin_api.get_services(binary='nova-compute')
self.assertEqual(1, len(services))
service = services[0]
# Now create a host aggregate and add our host to it.
aggregate = self.admin_api.post_aggregate(
{'aggregate': {'name': 'agg1'}})
self.admin_api.add_host_to_aggregate(aggregate['id'], service['host'])
# Make sure the host is in the aggregate.
aggregate = self.admin_api.api_get(
'/os-aggregates/%s' % aggregate['id']).body['aggregate']
self.assertEqual([service['host']], aggregate['hosts'])
rp_uuid = self._get_provider_uuid_by_host(service['host'])
# We'll know there is a host mapping implicitly if os-hypervisors
# returned something in _get_provider_uuid_by_host, but let's also
# make sure the host mapping is there like we expect.
ctxt = nova_context.get_admin_context()
objects.HostMapping.get_by_host(ctxt, service['host'])
# Make sure there is a resource provider for that compute node based
# on the uuid.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
# Make sure the resource provider has inventory.
inventories = self._get_provider_inventory(rp_uuid)
# Expect a minimal set of inventory for the fake virt driver.
for resource_class in [orc.VCPU, orc.MEMORY_MB, orc.DISK_GB]:
self.assertIn(resource_class, inventories)
# Now create a server so that the resource provider has some allocation
# records.
flavor = self.api.get_flavors()[0]
server = self._boot_and_check_allocations(flavor, service['host'])
# Now the fun part, delete the compute service and make sure related
# resources are cleaned up, like the compute node, host mapping, and
# resource provider. We have to first stop the compute service so
# it doesn't recreate the compute node during the
# update_available_resource periodic task.
self.admin_api.put_service(service['id'], {'forced_down': True})
compute.stop()
# The first attempt should fail since there is an instance on the
# compute host.
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_delete,
'/os-services/%s' % service['id'])
self.assertIn('Unable to delete compute service that is hosting '
'instances.', six.text_type(ex))
self.assertEqual(409, ex.response.status_code)
# Now delete the instance and wait for it to be gone.
self._delete_and_check_allocations(server)
# Now we can delete the service.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# Make sure the service is deleted.
services = self.admin_api.get_services(binary='nova-compute')
self.assertEqual(0, len(services))
# Make sure the host was removed from the aggregate.
aggregate = self.admin_api.api_get(
'/os-aggregates/%s' % aggregate['id']).body['aggregate']
self.assertEqual([], aggregate['hosts'])
# Trying to get the hypervisor should result in a 404.
self.admin_api.api_get(
'os-hypervisors?hypervisor_hostname_pattern=%s' % service['host'],
check_response_status=[404])
# The host mapping should also be gone.
self.assertRaises(exception.HostMappingNotFound,
objects.HostMapping.get_by_host,
ctxt, service['host'])
# And finally, the resource provider should also be gone. The API
# will perform a cascading delete of the resource provider inventory
# and allocation information.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(404, resp.status)
def test_evacuate_then_delete_compute_service(self):
"""Tests a scenario where a server is created on a host, the host
goes down, the server is evacuated to another host, and then the
source host compute service is deleted. After that the deleted
compute service is restarted. Related placement resources are checked
throughout.
"""
# Create our source host that we will evacuate *from* later.
host1 = self._start_compute('host1')
# Create a server which will go on host1 since it is the only host.
flavor = self.api.get_flavors()[0]
server = self._boot_and_check_allocations(flavor, 'host1')
# Get the compute service record for host1 so we can manage it.
service = self.admin_api.get_services(
binary='nova-compute', host='host1')[0]
# Get the corresponding resource provider uuid for host1.
rp_uuid = self._get_provider_uuid_by_host(service['host'])
# Make sure there is a resource provider for that compute node based
# on the uuid.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
# Down the compute service for host1 so we can evacuate from it.
self.admin_api.put_service(service['id'], {'forced_down': True})
host1.stop()
# Start another host and trigger the server evacuate to that host.
self._start_compute('host2')
self.admin_api.post_server_action(server['id'], {'evacuate': {}})
# The host does not change until after the status is changed to ACTIVE
# so wait for both parameters.
self._wait_for_server_parameter(server, {
'status': 'ACTIVE',
'OS-EXT-SRV-ATTR:host': 'host2'})
# Delete the compute service for host1 and check the related
# placement resources for that host.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# Make sure the service is gone.
services = self.admin_api.get_services(
binary='nova-compute', host='host1')
self.assertEqual(0, len(services), services)
# FIXME(mriedem): This is bug 1829479 where the compute service is
# deleted but the resource provider is not because there are still
# allocations against the provider from the evacuated server.
resp = self.placement_api.get('/resource_providers/%s' % rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(rp_uuid, flavor)
# Try to restart the host1 compute service to create a new resource
# provider.
self.restart_compute_service(host1)
# FIXME(mriedem): This is bug 1817833 where restarting the now-deleted
# compute service attempts to create a new resource provider with a
# new uuid but the same name which results in a conflict. The service
# does not die, however, because _update_available_resource_for_node
# catches and logs but does not re-raise the error.
log_output = self.stdlog.logger.output
self.assertIn('Error updating resources for node host1.', log_output)
self.assertIn('Failed to create resource provider host1', log_output)
def test_migrate_confirm_after_deleted_source_compute(self):
"""Tests a scenario where a server is cold migrated and while in
VERIFY_RESIZE status the admin attempts to delete the source compute
and then the user tries to confirm the resize.
"""
# Start a compute service and create a server there.
self._start_compute('host1')
host1_rp_uuid = self._get_provider_uuid_by_host('host1')
flavor = self.api.get_flavors()[0]
server = self._boot_and_check_allocations(flavor, 'host1')
# Start a second compute service so we can cold migrate there.
self._start_compute('host2')
host2_rp_uuid = self._get_provider_uuid_by_host('host2')
# Cold migrate the server to host2.
self._migrate_and_check_allocations(
server, flavor, host1_rp_uuid, host2_rp_uuid)
# Delete the source compute service.
service = self.admin_api.get_services(
binary='nova-compute', host='host1')[0]
# We expect the delete request to fail with a 409 error because of the
# instance in VERIFY_RESIZE status even though that instance is marked
# as being on host2 now.
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_delete,
'/os-services/%s' % service['id'])
self.assertEqual(409, ex.response.status_code)
self.assertIn('Unable to delete compute service that has in-progress '
'migrations', six.text_type(ex))
self.assertIn('There are 1 in-progress migrations involving the host',
self.stdlog.logger.output)
# The provider is still around because we did not delete the service.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor)
# Now try to confirm the migration.
self._confirm_resize(server)
# Delete the host1 service since the migration is confirmed and the
# server is on host2.
self.admin_api.api_delete('/os-services/%s' % service['id'])
# The host1 resource provider should be gone.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(404, resp.status)
def test_resize_revert_after_deleted_source_compute(self):
"""Tests a scenario where a server is resized and while in
VERIFY_RESIZE status the admin attempts to delete the source compute
and then the user tries to revert the resize.
"""
# Start a compute service and create a server there.
self._start_compute('host1')
host1_rp_uuid = self._get_provider_uuid_by_host('host1')
flavors = self.api.get_flavors()
flavor1 = flavors[0]
flavor2 = flavors[1]
server = self._boot_and_check_allocations(flavor1, 'host1')
# Start a second compute service so we can resize there.
self._start_compute('host2')
host2_rp_uuid = self._get_provider_uuid_by_host('host2')
# Resize the server to host2.
self._resize_and_check_allocations(
server, flavor1, flavor2, host1_rp_uuid, host2_rp_uuid)
# Delete the source compute service.
service = self.admin_api.get_services(
binary='nova-compute', host='host1')[0]
# We expect the delete request to fail with a 409 error because of the
# instance in VERIFY_RESIZE status even though that instance is marked
# as being on host2 now.
ex = self.assertRaises(api_client.OpenStackApiException,
self.admin_api.api_delete,
'/os-services/%s' % service['id'])
self.assertEqual(409, ex.response.status_code)
self.assertIn('Unable to delete compute service that has in-progress '
'migrations', six.text_type(ex))
self.assertIn('There are 1 in-progress migrations involving the host',
self.stdlog.logger.output)
# The provider is still around because we did not delete the service.
resp = self.placement_api.get('/resource_providers/%s' % host1_rp_uuid)
self.assertEqual(200, resp.status)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor1)
# Now revert the resize.
self._revert_resize(server)
self.assertFlavorMatchesUsage(host1_rp_uuid, flavor1)
zero_flavor = {'vcpus': 0, 'ram': 0, 'disk': 0, 'extra_specs': {}}
self.assertFlavorMatchesUsage(host2_rp_uuid, zero_flavor)
# Delete the host2 service since the migration is reverted and the
# server is on host1 again.
service2 = self.admin_api.get_services(
binary='nova-compute', host='host2')[0]
self.admin_api.api_delete('/os-services/%s' % service2['id'])
# The host2 resource provider should be gone.
resp = self.placement_api.get('/resource_providers/%s' % host2_rp_uuid)
self.assertEqual(404, resp.status)
class ComputeStatusFilterTest(integrated_helpers.ProviderUsageBaseTestCase):
"""Tests the API, compute service and Placement interaction with the
COMPUTE_STATUS_DISABLED trait when a compute service is enable/disabled.
This version of the test uses the 2.latest microversion for testing the
2.53+ behavior of the PUT /os-services/{service_id} API.
"""
compute_driver = 'fake.SmallFakeDriver'
def _update_service(self, service, disabled, forced_down=None):
"""Update the service using the 2.53 request schema.
:param service: dict representing the service resource in the API
:param disabled: True if the service should be disabled, False if the
service should be enabled
:param forced_down: Optionally change the forced_down value.
"""
status = 'disabled' if disabled else 'enabled'
req = {'status': status}
if forced_down is not None:
req['forced_down'] = forced_down
self.admin_api.put_service(service['id'], req)
def test_compute_status_filter(self):
"""Tests the compute_status_filter placement request filter"""
# Start a compute service so a compute node and resource provider is
# created.
compute = self._start_compute('host1')
# Get the UUID of the resource provider that was created.
rp_uuid = self._get_provider_uuid_by_host('host1')
# Get the service from the compute API.
services = self.admin_api.get_services(binary='nova-compute',
host='host1')
self.assertEqual(1, len(services))
service = services[0]
# At this point, the service should be enabled and the
# COMPUTE_STATUS_DISABLED trait should not be set on the
# resource provider in placement.
self.assertEqual('enabled', service['status'])
rp_traits = self._get_provider_traits(rp_uuid)
trait = os_traits.COMPUTE_STATUS_DISABLED
self.assertNotIn(trait, rp_traits)
# Now disable the compute service via the API.
self._update_service(service, disabled=True)
# The update to placement should be synchronous so check the provider
# traits and COMPUTE_STATUS_DISABLED should be set.
rp_traits = self._get_provider_traits(rp_uuid)
self.assertIn(trait, rp_traits)
# Try creating a server which should fail because nothing is available.
networks = [{'port': self.neutron.port_1['id']}]
server_req = self._build_server(networks=networks)
server = self.api.post_server({'server': server_req})
server = self._wait_for_state_change(server, 'ERROR')
# There should be a NoValidHost fault recorded.
self.assertIn('fault', server)
self.assertIn('No valid host', server['fault']['message'])
# Now enable the service and the trait should be gone.
self._update_service(service, disabled=False)
rp_traits = self._get_provider_traits(rp_uuid)
self.assertNotIn(trait, rp_traits)
# Try creating another server and it should be OK.
server = self.api.post_server({'server': server_req})
self._wait_for_state_change(server, 'ACTIVE')
# Stop, force-down and disable the service so the API cannot call
# the compute service to sync the trait.
compute.stop()
self._update_service(service, disabled=True, forced_down=True)
# The API should have logged a message about the service being down.
self.assertIn('Compute service on host host1 is down. The '
'COMPUTE_STATUS_DISABLED trait will be synchronized '
'when the service is restarted.',
self.stdlog.logger.output)
# The trait should not be on the provider even though the node is
# disabled.
rp_traits = self._get_provider_traits(rp_uuid)
self.assertNotIn(trait, rp_traits)
# Restart the compute service which should sync and set the trait on
# the provider in placement.
self.restart_compute_service(compute)
rp_traits = self._get_provider_traits(rp_uuid)
self.assertIn(trait, rp_traits)
class ComputeStatusFilterTest211(ComputeStatusFilterTest):
"""Extends ComputeStatusFilterTest and uses the 2.11 API for the
legacy os-services disable/enable/force-down API behavior
"""
microversion = '2.11'
def _update_service(self, service, disabled, forced_down=None):
"""Update the service using the 2.11 request schema.
:param service: dict representing the service resource in the API
:param disabled: True if the service should be disabled, False if the
service should be enabled
:param forced_down: Optionally change the forced_down value.
"""
# Before 2.53 the service is uniquely identified by host and binary.
body = {
'host': service['host'],
'binary': service['binary']
}
# Handle forced_down first if provided since the enable/disable
# behavior in the API depends on it.
if forced_down is not None:
body['forced_down'] = forced_down
self.admin_api.api_put('/os-services/force-down', body)
if disabled:
self.admin_api.api_put('/os-services/disable', body)
else:
self.admin_api.api_put('/os-services/enable', body)
def _get_provider_uuid_by_host(self, host):
# We have to temporarily mutate to 2.53 to get the hypervisor UUID.
with utils.temporary_mutation(self.admin_api, microversion='2.53'):
return super(ComputeStatusFilterTest211,
self)._get_provider_uuid_by_host(host)
| rahulunair/nova | nova/tests/functional/wsgi/test_services.py | Python | apache-2.0 | 19,683 |
# coding: utf-8
"""
This module contains event manager for the current platform.
"""
from __future__ import absolute_import
# Local imports
from .compat import IS_CYGWIN
from .compat import IS_LINUX
from .compat import IS_MACOS
from .compat import IS_WINOS
from .compat import UNSUPPORTED_PLATFORM_ERROR
# If the platform is Linux
if IS_LINUX:
# Use event manager for Linux
from .event_manager_x11 import EventManager
# If the platform is MacOS
elif IS_MACOS:
# Use event manager for MACOS
from .event_manager_macos import EventManager
# If the platform is Windows
elif IS_WINOS:
# Use event manager for Windows
from .event_manager_windows import EventManager
# If the platform is Cygwin
elif IS_CYGWIN:
# Use event manager for Cygwin
from .event_manager_cygwin import EventManager
# If the platform is none of above
else:
# Raise error
raise UNSUPPORTED_PLATFORM_ERROR
# Suppress linter error
EventManager = EventManager
| AoiKuiyuyou/AoikHotkey | src/aoikhotkey/event_manager.py | Python | mit | 971 |
# -*- coding: utf-8 -*-
#
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
EVENTS = {}
| sadig/DC2 | components/dc2-web-client/dc2/web/client/lib/events.py | Python | gpl-2.0 | 866 |
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
import json
import copy
import tempfile
import os
import getpass
import pkg_resources
from jinja2 import Template
from cloudify.utils import setup_logger
import cloudify_agent
from cloudify_agent import VIRTUALENV
from cloudify_agent.api import defaults
logger = setup_logger('cloudify_agent.api.utils')
class _Internal(object):
"""
Contains various internal utility methods. Import this at your own
peril, as backwards compatibility is not guaranteed.
"""
CLOUDIFY_DAEMON_NAME_KEY = 'CLOUDIFY_DAEMON_NAME'
CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY = 'CLOUDIFY_DAEMON_STORAGE_DIRECTORY'
CLOUDIFY_DAEMON_USER_KEY = 'CLOUDIFY_DAEMON_USER'
@classmethod
def get_daemon_name(cls):
"""
Returns the name of the currently running daemon.
"""
return os.environ[cls.CLOUDIFY_DAEMON_NAME_KEY]
@classmethod
def get_daemon_storage_dir(cls):
"""
Returns the storage directory the current daemon is stored under.
"""
return os.environ[cls.CLOUDIFY_DAEMON_STORAGE_DIRECTORY_KEY]
@classmethod
def get_daemon_user(cls):
"""
Return the user the current daemon is running under
"""
return os.environ[cls.CLOUDIFY_DAEMON_USER_KEY]
@staticmethod
def get_storage_directory(username=None):
"""
Retrieve path to the directory where all daemon
registered under a specific username will be stored.
:param username: the user
"""
return os.path.join(get_home_dir(username), '.cfy-agent')
@staticmethod
def generate_agent_name():
"""
Generates a unique name with a pre-defined prefix
"""
return '{0}-{1}'.format(
defaults.CLOUDIFY_AGENT_PREFIX,
uuid.uuid4())
@staticmethod
def daemon_to_dict(daemon):
"""
Return a json representation of the daemon by copying the __dict__
attribute value. Also notice that this implementation removes any
attributes starting with the underscore ('_') character.
:param daemon: the daemon.
:type daemon: cloudify_agent.api.pm.base.Daemon
"""
try:
getattr(daemon, '__dict__')
except AttributeError:
raise ValueError('Cannot save a daemon with '
'no __dict__ attribute.')
# don't use deepcopy here because we this will try to copy
# the internal non primitive attributes
original = daemon.__dict__
result = copy.copy(original)
for attr in original:
if attr.startswith('_'):
result.pop(attr)
return result
internal = _Internal()
def get_agent_stats(name, celery):
"""
Query for agent stats based on agent name.
:param name: the agent name
:param celery: the celery client to use
:return: agents stats
:rtype: dict
"""
destination = 'celery@{0}'.format(name)
inspect = celery.control.inspect(
destination=[destination])
stats = (inspect.stats() or {}).get(destination)
return stats
def get_home_dir(username=None):
"""
Retrieve the home directory of the given user. If no user was specified,
the currently logged user will be used.
:param username: the user.
"""
if os.name == 'nt':
if username is None:
return os.path.expanduser('~')
else:
return os.path.expanduser('~{0}'.format(username))
else:
import pwd
if username is None:
if 'SUDO_USER' in os.environ:
# command was executed via sudo
# get the original user
username = os.environ['SUDO_USER']
else:
username = getpass.getuser()
return pwd.getpwnam(username).pw_dir
def render_template_to_file(template_path, file_path=None, **values):
"""
Render a 'jinja' template resource to a temporary file.
:param template_path: relative path to the template.
:param file_path: absolute path to the desired output file.
:param values: keyword arguments passed to jinja.
"""
template = get_resource(template_path)
rendered = Template(template).render(**values)
return content_to_file(rendered, file_path)
def resource_to_tempfile(resource_path):
"""
Copy a resource into a temporary file.
:param resource_path: relative path to the resource.
:return path to the temporary file.
"""
resource = get_resource(resource_path)
return content_to_file(resource)
def get_resource(resource_path):
"""
Loads the resource into a string.
:param resource_path: relative path to the resource.
"""
return pkg_resources.resource_string(
cloudify_agent.__name__,
os.path.join('resources', resource_path)
)
def get_absolute_resource_path(resource_path):
"""
Retrieves the absolute path in the file system of a resource of the
package.
:param resource_path: the relative path to the resource
"""
return pkg_resources.resource_filename(
cloudify_agent.__name__,
os.path.join('resources', resource_path)
)
def content_to_file(content, file_path=None):
"""
Write string to a temporary file.
:param content:
:param file_path: absolute path to the desired output file.
"""
if not file_path:
file_path = tempfile.NamedTemporaryFile(mode='w', delete=False).name
with open(file_path, 'w') as f:
f.write(content)
f.write(os.linesep)
return file_path
def get_executable_path(executable):
"""
Lookup the path to the executable, os agnostic
:param executable: the name of the executable
"""
if os.name == 'posix':
return '{0}/bin/{1}'.format(VIRTUALENV, executable)
else:
return '{0}\\Scripts\\{1}'.format(VIRTUALENV, executable)
def get_cfy_agent_path():
"""
Lookup the path to the cfy-agent executable, os agnostic
:return: path to the cfy-agent executable
"""
return get_executable_path('cfy-agent')
def get_pip_path():
"""
Lookup the path to the pip executable, os agnostic
:return: path to the pip executable
"""
return get_executable_path('pip')
def get_celery_path():
"""
Lookup the path to the celery executable, os agnostic
:return: path to the celery executable
"""
return get_executable_path('celery')
def get_python_path():
"""
Lookup the path to the python executable, os agnostic
:return: path to the python executable
"""
return get_executable_path('python')
def env_to_file(env_variables, destination_path=None, posix=True):
"""
Write environment variables to a file.
:param env_variables: environment variables
:param destination_path: destination path of a file where the
environment variables will be stored. the
stored variables will be a bash script you can
then source.
:param posix: false if the target of the generated file will be a
windows machine
"""
if not env_variables:
return None
if not destination_path:
destination_path = tempfile.mkstemp(suffix='env')[1]
if posix:
linesep = '\n'
else:
linesep = '\r\n'
with open(destination_path, 'w') as f:
if posix:
f.write('#!/bin/bash')
f.write(linesep)
f.write('# Environmnet file generated by Cloudify. Do not delete '
'unless you know exactly what you are doing.')
f.write(linesep)
f.write(linesep)
else:
f.write('rem Environmnet file generated by Cloudify. Do not '
'delete unless you know exactly what you are doing.')
f.write(linesep)
for key, value in env_variables.iteritems():
if posix:
f.write('export {0}={1}'.format(key, value))
f.write(linesep)
else:
f.write('set {0}={1}'.format(key, value))
f.write(linesep)
f.write(linesep)
return destination_path
def stringify_values(dictionary):
"""
Given a dictionary convert all values into the string representation of
the value. useful for dicts that only allow string values (like os.environ)
:param dictionary: the dictionary to convert
:return: a copy of the dictionary where all values are now string.
:rtype: dict
"""
dict_copy = copy.deepcopy(dictionary)
for key, value in dict_copy.iteritems():
if isinstance(value, dict):
dict_copy[key] = stringify_values(value)
else:
dict_copy[key] = str(value)
return dict_copy
def purge_none_values(dictionary):
"""
Given a dictionary remove all key who's value is None. Does not purge
nested values.
:param dictionary: the dictionary to convert
:return: a copy of the dictionary where no key has a None value
"""
dict_copy = copy.deepcopy(dictionary)
for key, value in dictionary.iteritems():
if dictionary[key] is None:
del dict_copy[key]
return dict_copy
def json_load(file_path):
"""
Loads a JSON file into a dictionary.
:param file_path: path to the json file
"""
with open(file_path) as f:
return json_loads(f.read())
def json_loads(content):
"""
Loads a JSON string into a dictionary.
If the string is not a valid json, it will be part
of the raised exception.
:param content: the string to load
"""
try:
return json.loads(content)
except ValueError as e:
raise ValueError('{0}:{1}{2}'.format(str(e), os.linesep, content))
| geokala/cloudify-agent | cloudify_agent/api/utils.py | Python | apache-2.0 | 10,561 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import botsglobal
from . import models
my_context = {} # save vars initialised at startup
def set_context(request):
''' set variables in the context of templates.
'''
global my_context
if not my_context:
#most context vars are from bots.ini or database. initialise these at startup
my_context['bots_environment_text'] = botsglobal.ini.get('webserver', 'environment_text', ' ')
my_context['bots_environment_text_color'] = botsglobal.ini.get('webserver', 'environment_text_color', '#000000')
my_context['botslogo'] = botsglobal.ini.get('webserver', 'botslogo', 'bots/botslogo.html')
my_context['bots_touchscreen'] = botsglobal.ini.getboolean('webserver', 'bots_touchscreen', False)
my_context['bots_mindate'] = 0 - botsglobal.ini.getint('settings', 'maxdays', 30)
my_context['menu_automaticretrycommunication'] = botsglobal.ini.getboolean(
'webserver', 'menu_automaticretrycommunication', False)
my_context['menu_cleanup'] = botsglobal.ini.getboolean('webserver', 'menu_cleanup', False)
#in bots.ini it is possible to add custom menus
if botsglobal.ini.has_section('custommenus'):
my_context['custom_menuname'] = botsglobal.ini.get('custommenus', 'menuname', 'Custom')
my_context['custom_menus'] = [(key.title(), value)
for key, value in botsglobal.ini.items('custommenus') if key != 'menuname']
# in bots.ini can be indicated that all routes (in config->routes, if
# route is activated) can be run individually via menu
if botsglobal.ini.get('webserver', 'menu_all_routes', '') == 'notindefaultrun':
my_context['menu_all_routes'] = list(models.routes.objects.values_list('idroute', flat=True).filter(
active=True).filter(notindefaultrun=True).order_by('idroute').distinct())
elif botsglobal.ini.getboolean('webserver', 'menu_all_routes', False):
my_context['menu_all_routes'] = list(models.routes.objects.values_list(
'idroute', flat=True).filter(active=True).order_by('idroute').distinct())
#bots_http_path is used in name of browser-window; this is derived from url/path
bots_http_path = request.get_full_path()
if bots_http_path.startswith('/admin/bots/'):
bots_http_path = bots_http_path[12:]
else:
bots_http_path = bots_http_path[1:]
if bots_http_path:
if bots_http_path[-1] == '/':
bots_http_path = bots_http_path[:-1]
else:
bots_http_path = 'home'
my_context['bots_http_path'] = bots_http_path
my_context['bots_minDate'] = 0 - botsglobal.ini.getint('settings', 'maxdays', 30)
#***variables are set now for template use, eg {{ bots_environment_text }}
return my_context
| WouterVH/bots | src/bots/bots_context.py | Python | gpl-3.0 | 2,922 |
from math import sqrt
def update(index,value,blocks,input_arr):
blocks[int(index/sqrt(len(input_arr)))] += value-input_arr[index]
input_arr[index]=value
#assuming 0 based indexing
def query(low,high,blocks,input_arr):
block_size=int(sqrt(len(input_arr)))
total=0
while(low % block_size!=0 and low<high):
total+=input_arr[low]
low+=1
while(low+block_size<=high):
total+=blocks[low//block_size]
low+=block_size
while(low<=high):
total+=input_arr[low]
low+=1
return total
#divide the list in blocks of size sqrt(n), sum of elements in each block is stored in blocks
def formBlocks(input_arr):
block_size=int(sqrt(len(input_arr)))
index=-1
blocks=[0]*(len(input_arr)//block_size+1)
for i in range(len(input_arr)):
if(i%block_size==0):
index+=1
blocks[index]+=input_arr[i]
return blocks
'''
Applying square root decomposition to perform range queries
for finding the sum of numbers in a given range
Indexing : 0 based in list
'''
def main():
input_arr = list(map(int, input().split()))
blocks = formBlocks(input_arr)
#queries
'''
input queries with left and right indices to find the sum in that range
'''
print(query(0, 2, blocks, input_arr)) #find sum from index 0 to index 2
print(query(3, 7, blocks, input_arr)) #find sum from index 3 to index 7
print(query(1, 4, blocks, input_arr)) #find sum from index 1 to index 4
#update
update(3, 1, blocks, input_arr) #update the value at index 3 with value of 1
#verify if the update was success
print(query(1, 4, blocks, input_arr))
if __name__ == '__main__':
main()
'''
Input:
2 3 4 6 8 9 1 -1 0 3
Expected output:
9
23
21
18
'''
| jainaman224/Algo_Ds_Notes | Square_Root_Decomposition/Square_Root_Decomposition.py | Python | gpl-3.0 | 1,791 |
from unittest import TestCase
from whales.viscous_drag import ViscousDragModel
import numpy as np
import numpy.testing
from numpy.testing import assert_array_almost_equal_nulp
class MyTestCase(TestCase):
def assertArraysEqual(self, a, b):
numpy.testing.assert_array_equal(a, b)
class TaperedMemberTestCase(MyTestCase):
"""Test simple model with one member"""
def setUp(self):
config = {
'drag coefficient': 0.7,
'members': [{
'end1': [0, 0, 0],
'end2': [0, 0, -10],
'diameter': [[0, 3, 5, 10], [2.2, 2.2, 5, 5]],
'strip width': 0.5,
}],
}
self.model = ViscousDragModel(config)
def test_model_elements(self):
"""Check element strips setup correctly"""
m = self.model
self.assertArraysEqual(m.element_lengths, 0.5 * np.ones(20))
# Centres of strips
centres = np.zeros((20, 3))
centres[:,2] = -np.arange(0.25, 10, 0.5)
self.assertArraysEqual(m.element_centres, centres)
# Element diameters
self.assertEqual(m.element_diameters[ 0], 2.2)
self.assertEqual(m.element_diameters[ 5], 2.2)
self.assertEqual(m.element_diameters[10], 5.0)
self.assertEqual(m.element_diameters[-1], 5.0)
self.assertEqual(m.element_diameters[7], 2.2 + (5.0 - 2.2)*0.75/(5-3))
# Element axes
self.assertArraysEqual(m.element_axes, np.array([np.eye(3)] * 20))
class CylinderTestCase(MyTestCase):
"""Test simple model with vertical cylinder"""
def setUp(self):
config = {
'drag coefficient': 0.7,
'inertia coefficient': 2.0,
'members': [{
'end1': [0, 0, 0],
'end2': [0, 0, -10],
'diameter': 2.3,
'strip width': 1,
}],
}
self.model = ViscousDragModel(config)
def test_model_elements(self):
"""Check element strips setup correctly"""
m = self.model
self.assertArraysEqual(m.element_lengths, np.ones(10))
# Centres of strips
centres = np.zeros((10, 3))
centres[:,2] = -np.arange(0.5, 10, 1)
self.assertArraysEqual(m.element_centres, centres)
# Element diameters
self.assertArraysEqual(m.element_diameters, 2.3)
# Element axes
self.assertArraysEqual(m.element_axes, np.array([np.eye(3)] * 10))
def test_wave_velocity_transfer_func(self):
"""Test wave velocity transfer function"""
w = np.array([1,2]) # frequencies to test
H_uf = self.model.wave_velocity_transfer_function(w)
# With waves in x-direction, sideways velocity should be zero
self.assertArraysEqual(H_uf[:,:,1], 0)
# Check variation in depth: exp(kz)
iz1 = 3
iz2 = 8
z = self.model.element_centres[:,2]
for i in range(2):
assert_array_almost_equal_nulp(H_uf[i,iz1,:] / np.exp(w[i]**2/9.81*z[iz1]),
H_uf[i,iz2,:] / np.exp(w[i]**2/9.81*z[iz2]))
# Check all x velocities are in-phase and real, all z are imaginary
self.assertTrue(np.isreal( H_uf[:,:,0]).all())
self.assertTrue(np.isreal(1j * H_uf[:,:,2]).all())
def test_structural_velocity_transfer_func(self):
"""Test structural velocity with special cases"""
w = np.array([1,2]) # frequencies to test
# Case 1: pure surge motion
H1 = np.zeros((2, 6)) # shape (freq, xyzXYZ)
H1[:,0] = 1 # maximum surge at maximum datum wave height
H_us = self.model.structural_velocity_transfer_function(w, H1)
# all elements should have same surge velocity; all other velocities zero
# at t=0, velocity is zero and becoming negative 90 deg later
self.assertArraysEqual(H_us[1,:,0], 2j)
self.assertArraysEqual(H_us[0,:,0], 1j)
self.assertArraysEqual(H_us[:,:,1:], 0)
# Case 2: pure roll motion
H2 = np.zeros((2, 6)) # shape (freq, xyzXYZ)
H2[:,3] = 1 # maximum roll at maximum datum wave height
H_us = self.model.structural_velocity_transfer_function(w, H2)
# x & z velocity should be zero
self.assertArraysEqual(H_us[:,:,[0,2]], 0)
# y velocity corresponding to rotation about origin (check bottom)
# at t=0, ang. velocity is zero and becoming negative 90 deg later
# Velocity of bottom element = 9.5 * ang vel
self.assertArraysEqual(H_us[0,-1,1], 9.5 * 1j)
self.assertArraysEqual(H_us[1,-1,1], 9.5 * 2j)
def test_added_mass(self):
"""Test added mass calculation from Morison elements"""
A = self.model.Morison_added_mass()
# Expected surge added mass: (Cm-1) * rho * V
self.assertEqual(A[0,0], 1 * 1025 * 10 * np.pi * 2.3**2 / 4)
class ResolvingTestCase(MyTestCase):
"""Test differently-oriented members"""
def setUp(self):
config = {
'drag coefficient': 0.7,
'members': [{
# # Member in x-direction
# 'end1': [0, 0, 0],
# 'end2': [1, 0, 0],
# 'diameter': 1,
# 'strip width': 1,
# }, {
# Member in z-direction
'end1': [0, 0, 0],
'end2': [0, 0, 1],
'diameter': 1,
'strip width': 1,
# }, {
# # Member in xy plane at 30deg from x axis
# 'end1': [0, 0, 0],
# 'end2': [np.cos(30*np.pi/180), np.sin(30*np.pi/180), 0],
# 'diameter': 1,
# 'strip width': 1,
}],
}
self.model = ViscousDragModel(config)
def test_resolve_to_local_coords(self):
"""Calculate locally-normal vector components"""
resolve = self.model.resolve_perpendicular_to_elements
# Test 1: velocity in x-direction
v1 = [1, 0, 0]
self.assertArraysEqual(resolve(v1), [
# [0, 0], # member in x-direction, no normal velocity
[1, 0], # member in z-direction
# [np.nan, np.nan], # member 30deg from x-axis
])
# Test 1: velocity in 3d
v2 = [1, 1, 2]
self.assertArraysEqual(resolve(v2), [
# [0, 0], # member in x-direction, no normal velocity
[1, 1], # member in z-direction
# [np.nan, np.nan], # member 30deg from x-axis
])
| ricklupton/whales | tests/test_viscous_drag.py | Python | mit | 6,533 |
# Tai Sakuma <tai.sakuma@gmail.com>
import logging
from .parse_indices_config import parse_indices_config
from .BackrefMultipleArrayReader import BackrefMultipleArrayReader
##__________________________________________________________________||
class KeyValueComposer:
"""This class composes keys and values for the event
(this docstring is under development.)
This class can be used with BEvents.
This class supports inclusive indices '*'
This class supports back references.
"""
def __init__(self, keyAttrNames=None, binnings=None, keyIndices=None,
valAttrNames=None, valIndices=None):
# for __repr__()
name_value_pairs = (
('keyAttrNames', keyAttrNames),
('binnings', binnings),
('keyIndices', keyIndices),
('valAttrNames', valAttrNames),
('valIndices', valIndices),
)
self._repr = '{}({})'.format(
self.__class__.__name__,
', '.join(['{}={!r}'.format(n, v) for n, v in name_value_pairs]),
)
key_attr_names = tuple(keyAttrNames) if keyAttrNames is not None else ()
key_idxs = tuple(keyIndices) if keyIndices is not None else (None, )*len(key_attr_names)
val_attr_names = tuple(valAttrNames) if valAttrNames is not None else ()
val_idxs = tuple(valIndices) if valIndices is not None else (None, )*len(val_attr_names)
if not len(key_attr_names) == len(key_idxs):
raise ValueError(
"the two tuples must have the same length: key_attr_names={}, key_idxs={}".format(
key_attr_names, key_idxs
)
)
if not len(val_attr_names) == len(val_idxs):
raise ValueError(
"the two tuples must have the same length: val_attr_names={}, val_idxs={}".format(
val_attr_names, val_idxs
)
)
self.binnings = tuple(binnings) if binnings is not None else None
if self.binnings is not None and not len(key_attr_names) == len(self.binnings):
raise ValueError(
"the two tuples must have the same length: key_attr_names={}, self.binnings={}".format(
key_attr_names, self.binnings
)
)
self._lenkey = len(key_attr_names)
self.attr_names = key_attr_names + val_attr_names
self.idxs_conf = key_idxs + val_idxs
self.backref_idxs, self.idxs_conf = parse_indices_config(self.idxs_conf)
self.ArrayReader = BackrefMultipleArrayReader
def __repr__(self):
return self._repr
def begin(self, event):
arrays = self._collect_arrays(event, self.attr_names)
self.active = True if arrays is not None else False
if not self.active: return
self._array_reader = self.ArrayReader(arrays, self.idxs_conf, self.backref_idxs)
def _collect_arrays(self, event, attr_names):
ret = [ ]
for varname in attr_names:
try:
attr = getattr(event, varname)
except AttributeError as e:
logger = logging.getLogger(__name__)
logger.warning('{!r}: {!s}'.format(self, e))
return None
ret.append(attr)
return ret
def __call__(self, event):
if not self.active: return ()
try:
arrays = self._array_reader.read()
except Exception as e:
logger = logging.getLogger(__name__)
logger.error(e)
logger.error(self)
raise
# e.g.,
# arrays = (
# (1001, 15.3, -1.2, 20.2, 2.2, 0.1, 16.2, 22.1),
# (1001, 15.3, -1.2, 11.9, 1.2, 0.1, 16.2, 15.2),
# (1001, 15.3, -1.2, 13.3, -1.5, 0.1, 16.2, 16.3),
# (1001, 12.9, 5.2, 20.2, 2.2, 0.6, 13.1, 22.1),
# (1001, 12.9, 5.2, 11.9, 1.2, 0.6, 13.1, 15.2),
# (1001, 12.9, 5.2, 13.3, -1.5, 0.6, 13.1, 16.3),
# (1001, 9.2, 2.2, 20.2, 2.2, 1.2, 10.1, 22.1),
# (1001, 9.2, 2.2, 11.9, 1.2, 1.2, 10.1, 15.2),
# (1001, 9.2, 2.2, 13.3, -1.5, 1.2, 10.1, 16.3)
# )
# separate into keys and vals
keyvals = tuple((e[:self._lenkey], e[self._lenkey:]) for e in arrays)
# e.g.,
# keyvals = (
# ((1001, 15.3, -1.2, 20.2, 2.2, 0.1), (16.2, 22.1)),
# ((1001, 15.3, -1.2, 11.9, 1.2, 0.1), (16.2, 15.2)),
# ((1001, 15.3, -1.2, 13.3, -1.5, 0.1), (16.2, 16.3)),
# ((1001, 12.9, 5.2, 20.2, 2.2, 0.6), (13.1, 22.1)),
# ((1001, 12.9, 5.2, 11.9, 1.2, 0.6), (13.1, 15.2)),
# ((1001, 12.9, 5.2, 13.3, -1.5, 0.6), (13.1, 16.3)),
# ((1001, 9.2, 2.2, 20.2, 2.2, 1.2), (10.1, 22.1)),
# ((1001, 9.2, 2.2, 11.9, 1.2, 1.2), (10.1, 15.2)),
# ((1001, 9.2, 2.2, 13.3, -1.5, 1.2), (10.1, 16.3))
# )
# apply binnings
if self.binnings:
keyvals = tuple((tuple(b(k) for b, k in zip(self.binnings, kk)), vv) for kk, vv in keyvals)
# e.g.,
# keyvals = (
# ((1001, 15, -2, 20, None, 0.1), (16.2, 22.1)),
# ((1001, 15, -2, 11, 1, 0.1), (16.2, 15.2)),
# ((1001, 15, -2, 13, -2, 0.1), (16.2, 16.3)),
# ((1001, 12, None, 20, None, 0.6), (13.1, 22.1)),
# ((1001, 12, None, 11, 1, 0.6), (13.1, 15.2)),
# ((1001, 12, None, 13, -2, 0.6), (13.1, 16.3)),
# ((1001, 9, 2, 20, None, 1.2), (10.1, 22.1)),
# ((1001, 9, 2, 11, 1, 1.2), (10.1, 15.2)),
# ((1001, 9, 2, 13, -2, 1.2), (10.1, 16.3))
# )
# remove None
keyvals = tuple(e for e in keyvals if None not in e[0] and None not in e[1])
# e.g.,
# keyvals = (
# ((1001, 15, -2, 11, 1, 0.1), (16.2, 15.2)),
# ((1001, 15, -2, 13, -2, 0.1), (16.2, 16.3)),
# ((1001, 9, 2, 11, 1, 1.2), (10.1, 15.2)),
# ((1001, 9, 2, 13, -2, 1.2), (10.1, 16.3))
# )
return keyvals
##__________________________________________________________________||
| alphatwirl/alphatwirl | alphatwirl/summary/KeyValueComposer.py | Python | bsd-3-clause | 6,270 |
# Copyright 2012 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import httplib
import json
import urllib
from neutron.openstack.common import log as logging
from neutron.plugins.vmware.api_client import request
LOG = logging.getLogger(__name__)
USER_AGENT = "Neutron eventlet client/2.0"
class EventletApiRequest(request.ApiRequest):
'''Eventlet-based ApiRequest class.
This class will form the basis for eventlet-based ApiRequest classes
'''
# Maximum number of green threads present in the system at one time.
API_REQUEST_POOL_SIZE = request.DEFAULT_API_REQUEST_POOL_SIZE
# Pool of green threads. One green thread is allocated per incoming
# request. Incoming requests will block when the pool is empty.
API_REQUEST_POOL = eventlet.GreenPool(API_REQUEST_POOL_SIZE)
# A unique id is assigned to each incoming request. When the current
# request id reaches MAXIMUM_REQUEST_ID it wraps around back to 0.
MAXIMUM_REQUEST_ID = request.DEFAULT_MAXIMUM_REQUEST_ID
# The request id for the next incoming request.
CURRENT_REQUEST_ID = 0
def __init__(self, client_obj, url, method="GET", body=None,
headers=None,
request_timeout=request.DEFAULT_REQUEST_TIMEOUT,
retries=request.DEFAULT_RETRIES,
auto_login=True,
redirects=request.DEFAULT_REDIRECTS,
http_timeout=request.DEFAULT_HTTP_TIMEOUT, client_conn=None):
'''Constructor.'''
self._api_client = client_obj
self._url = url
self._method = method
self._body = body
self._headers = headers or {}
self._request_timeout = request_timeout
self._retries = retries
self._auto_login = auto_login
self._redirects = redirects
self._http_timeout = http_timeout
self._client_conn = client_conn
self._abort = False
self._request_error = None
if "User-Agent" not in self._headers:
self._headers["User-Agent"] = USER_AGENT
self._green_thread = None
# Retrieve and store this instance's unique request id.
self._request_id = self.CURRENT_REQUEST_ID
# Update the class variable that tracks request id.
# Request IDs wrap around at MAXIMUM_REQUEST_ID
next_request_id = self._request_id + 1
next_request_id %= self.MAXIMUM_REQUEST_ID
self.CURRENT_REQUEST_ID = next_request_id
@classmethod
def _spawn(cls, func, *args, **kwargs):
'''Allocate a green thread from the class pool.'''
return cls.API_REQUEST_POOL.spawn(func, *args, **kwargs)
def spawn(self, func, *args, **kwargs):
'''Spawn a new green thread with the supplied function and args.'''
return self.__class__._spawn(func, *args, **kwargs)
@classmethod
def joinall(cls):
'''Wait for all outstanding requests to complete.'''
return cls.API_REQUEST_POOL.waitall()
def join(self):
'''Wait for instance green thread to complete.'''
if self._green_thread is not None:
return self._green_thread.wait()
return Exception(_('Joining an invalid green thread'))
def start(self):
'''Start request processing.'''
self._green_thread = self.spawn(self._run)
def copy(self):
'''Return a copy of this request instance.'''
return EventletApiRequest(
self._api_client, self._url, self._method, self._body,
self._headers, self._request_timeout, self._retries,
self._auto_login, self._redirects, self._http_timeout)
def _run(self):
'''Method executed within green thread.'''
if self._request_timeout:
# No timeout exception escapes the with block.
with eventlet.timeout.Timeout(self._request_timeout, False):
return self._handle_request()
LOG.info(_('[%d] Request timeout.'), self._rid())
self._request_error = Exception(_('Request timeout'))
return None
else:
return self._handle_request()
def _handle_request(self):
'''First level request handling.'''
attempt = 0
timeout = 0
response = None
while response is None and attempt <= self._retries:
eventlet.greenthread.sleep(timeout)
attempt += 1
req = self._issue_request()
# automatically raises any exceptions returned.
if isinstance(req, httplib.HTTPResponse):
timeout = 0
if attempt <= self._retries and not self._abort:
if req.status in (httplib.UNAUTHORIZED, httplib.FORBIDDEN):
continue
elif req.status == httplib.SERVICE_UNAVAILABLE:
timeout = 0.5
continue
# else fall through to return the error code
LOG.debug(_("[%(rid)d] Completed request '%(method)s %(url)s'"
": %(status)s"),
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': req.status})
self._request_error = None
response = req
else:
LOG.info(_('[%(rid)d] Error while handling request: %(req)s'),
{'rid': self._rid(), 'req': req})
self._request_error = req
response = None
return response
class LoginRequestEventlet(EventletApiRequest):
'''Process a login request.'''
def __init__(self, client_obj, user, password, client_conn=None,
headers=None):
if headers is None:
headers = {}
headers.update({"Content-Type": "application/x-www-form-urlencoded"})
body = urllib.urlencode({"username": user, "password": password})
super(LoginRequestEventlet, self).__init__(
client_obj, "/ws.v1/login", "POST", body, headers,
auto_login=False, client_conn=client_conn)
def session_cookie(self):
if self.successful():
return self.value.getheader("Set-Cookie")
return None
class GetApiProvidersRequestEventlet(EventletApiRequest):
'''Get a list of API providers.'''
def __init__(self, client_obj):
url = "/ws.v1/control-cluster/node?fields=roles"
super(GetApiProvidersRequestEventlet, self).__init__(
client_obj, url, "GET", auto_login=True)
def api_providers(self):
"""Parse api_providers from response.
Returns: api_providers in [(host, port, is_ssl), ...] format
"""
def _provider_from_listen_addr(addr):
# (pssl|ptcp):<ip>:<port> => (host, port, is_ssl)
parts = addr.split(':')
return (parts[1], int(parts[2]), parts[0] == 'pssl')
try:
if self.successful():
ret = []
body = json.loads(self.value.body)
for node in body.get('results', []):
for role in node.get('roles', []):
if role.get('role') == 'api_provider':
addr = role.get('listen_addr')
if addr:
ret.append(_provider_from_listen_addr(addr))
return ret
except Exception as e:
LOG.warn(_("[%(rid)d] Failed to parse API provider: %(e)s"),
{'rid': self._rid(), 'e': e})
# intentionally fall through
return None
class GenericRequestEventlet(EventletApiRequest):
'''Handle a generic request.'''
def __init__(self, client_obj, method, url, body, content_type,
auto_login=False,
request_timeout=request.DEFAULT_REQUEST_TIMEOUT,
http_timeout=request.DEFAULT_HTTP_TIMEOUT,
retries=request.DEFAULT_RETRIES,
redirects=request.DEFAULT_REDIRECTS):
headers = {"Content-Type": content_type}
super(GenericRequestEventlet, self).__init__(
client_obj, url, method, body, headers,
request_timeout=request_timeout, retries=retries,
auto_login=auto_login, redirects=redirects,
http_timeout=http_timeout)
def session_cookie(self):
if self.successful():
return self.value.getheader("Set-Cookie")
return None
request.ApiRequest.register(EventletApiRequest)
| vijayendrabvs/hap | neutron/plugins/vmware/api_client/eventlet_request.py | Python | apache-2.0 | 9,110 |
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from flop.cooking.forms import MealForm, MealContributionFormSet
from flop.decorators import view_decorator
@view_decorator(login_required)
class IndexView(TemplateView):
template_name = 'dashboard/index.html'
| sbrandtb/flop | flop/dashboard/views.py | Python | mit | 320 |
# coding=utf-8
# Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import json
import os
from unittest import TestCase
from hamcrest import assert_that, equal_to, only_contains, raises, none
from storops.exception import UnityNasServerNameUsedError, UnityException
from storops.unity.resp import RestResponse
from storops_test.utils import read_test_file
__author__ = 'Cedric Zhuang'
def read_json(folder, filename):
raw = read_test_file(os.path.join('unity', 'rest_data', folder), filename)
return json.loads(raw, encoding='utf-8')
def read_error_json(filename):
return read_json('error', filename)
class RestResponseTest(TestCase):
def test_has_next_page(self):
resp = RestResponse(read_json('metric', 'metrics_page_1.json'))
assert_that(resp.next_page, equal_to(2))
assert_that(resp.has_next_page, equal_to(True))
def test_has_current_page(self):
resp = RestResponse(read_json('metric', 'metrics_page_1.json'))
assert_that(resp.current_page, equal_to(1))
class UnityErrorTest(TestCase):
def test_get_properties(self):
body = read_error_json('409.json')
resp = RestResponse(body)
assert_that(resp.has_error(), equal_to(True))
err = resp.error
assert_that(err.existed, equal_to(True))
assert_that(str(err.created),
equal_to('2016-03-16 15:13:45.977000+00:00'))
assert_that(err.get_messages(), only_contains(
'The name of NAS server is already in use. '
'(Error Code:0x6702024)'))
assert_that(err.http_status_code, equal_to(409))
assert_that(err.error_code, equal_to(108011556))
def test_raise_if_err_409(self):
def f():
body = read_error_json('409.json')
resp = RestResponse(body)
resp.raise_if_err()
assert_that(f, raises(UnityNasServerNameUsedError, 'in use'))
def test_raise_if_err_nothing(self):
body = read_error_json('200.json')
resp = RestResponse(body)
resp.raise_if_err()
class UnityExceptionTest(TestCase):
def test_unity_exception_error_code(self):
resp = RestResponse(read_error_json('409.json'))
ex = UnityException(resp.error)
assert_that(ex.error_code, equal_to(108011556))
def test_unity_exception_default_error_code(self):
assert_that(UnityException().error_code, none())
| emc-openstack/storops | storops_test/unity/test_resp.py | Python | apache-2.0 | 3,036 |
import seaborn as sns
from pudzu.charts import *
from pudzu.sandbox.bamboo import *
countries = pd.read_csv("datasets/countries.csv")[["country", "continent", "flag"]].split_columns('country', "|").explode('country').set_index('country')
df = pd.read_csv("datasets/nobels.csv")
df = df[df['category'] == "Literature"][["name", "countries"]].split_columns("countries", "|")
df = df.assign_rows(continents=lambda r: tuple(sorted(set(countries.continent[c] for c in r.countries))))
NORDIC = ["Iceland", "Finland", "Sweden", "Norway", "Denmark"]
nordic_counts = df.update_columns(countries=lambda cs: tuple(sorted(set(c for c in cs if c in NORDIC)))).filter_rows(lambda r: any(r.countries)).groupby("countries").count().name
continent_counts = df.groupby("continents").count().name
counts = pd.concat((nordic_counts, continent_counts))
# chart
categories = { "Old World": ["Asia", "Africa"], "New World": ["South America", "North America", "Oceania"], "Europe": ["Europe"], "Nordic": NORDIC }
catlabels = { "Europe": "Europe (740m)", "New World": "Americas & Oceania (1,040m)", "Old World": "Asia & Africa (5,650m)", "Nordic": "Nordic countries (27m)" }
table = pd.DataFrame([{subcat: counts[subcat] if any(c in subcat for c in categories[cat]) else 0 for subcat in counts.index } for cat in categories], index=categories)
table = table.assign_rows(sum=lambda r: r.sum()).sort_values("sum", ascending=False).drop("sum", axis=1)
table = table[[ # hack #1 to get nice ordering
("Sweden",), ("Denmark",), ("Norway",), ("Finland",), ("Iceland",),
("Europe",),
("North America",), ("Europe", "North America"),
("Asia",), ("Asia", "Europe"),
("South America",), ("Europe", "South America"),
("Europe", "Oceania"),
("Africa",), ("Africa", "Europe"), ("Africa", "Oceania")
]]
WIDTH = 80
BAR = 3
PALETTE = tmap(RGBA, sns.xkcd_palette(["windows blue", "faded green", "amber", "dusty purple", "red", "brown"]))
CONTINENTS = [ "Europe", "North America", "South America", "Oceania", "Asia","Africa" ]
def continent_colour(c):
return PALETTE[CONTINENTS.index(c)]
def stripe(c1, c2=None):
if c2 is None: c2 = c1
return Image.from_column([Image.new("RGBA", (100,BAR), c1), Image.new("RGBA", (100,BAR), c2)])
def stripe_pattern(height, c1, c2=None):
return Image.from_pattern(stripe(c1, c2), (WIDTH,height))
def colorfn(c,r):
cs = table.columns[c] if isinstance(c, int) else (c,)
if len(cs) == 2 and cs[0] not in categories[table.index[r]]: cs = cs[1], cs[0]
if any(c in CONTINENTS for c in cs):
return lambda size: stripe_pattern(size[1], *[continent_colour(c) for c in reversed(cs)])
flagcolors = sorted(Image.from_url_with_cache(countries.flag[cs[0]]).convert("RGBA").getcolors(), reverse=True)
return lambda size: Image.from_row([Image.new("RGBA", (16, size[1]), flagcolors[0][1]),
Image.new("RGBA", (8, size[1]), flagcolors[1][1]),
Image.new("RGBA", (WIDTH-16-8, size[1]), flagcolors[0][1])])
def rlabelfn(r):
return Image.from_text(catlabels[table.index[r]], arial(14, bold=False), "black", "white", align="center", padding=2, max_width=WIDTH)
ymax = 100
chart = bar_chart(table, WIDTH, BAR*2*ymax, type=BarChartType.STACKED, spacing=10, colors=colorfn, grid_interval=10, tick_interval=5, label_font=arial(14), rlabels=rlabelfn, bg="white", fg="black", ylabel=Image.from_text("# Nobel Literature laureates", arial(14), padding=(0,2,0,10), bg="white").transpose(Image.ROTATE_90), ymax=ymax, clabels=None)
def lbox(c):
count = counts.select(lambda cs: c in cs).sum()
box = colorfn(c, 0)((WIDTH,WIDTH)).resize_fixed_aspect(width=BOXSIZE)
return box.place(Image.from_text(str(count), arial(14), "black" if c == "Finland" else "white"))
BOXSIZE = 30
cboxes = [[lbox(c), Image.from_text(c, arial(14), padding=(5,0), fg="black", bg="white")] for c in CONTINENTS]
clegs = Image.from_array(cboxes, bg="white", xalign=0)
nboxes = [[lbox(c), Image.from_text(c, arial(14), padding=(5,0), fg="black", bg="white")] for c in sorted(NORDIC)]
nlegs = Image.from_array(nboxes, bg="white", xalign=0)
legend = Image.from_column([
Image.from_text("Continents", arial(14, bold=True)),
clegs,
Image.from_text("(stripes indicate winners with dual nationalities)", arial(14), max_width=150, padding=(0,0,0,10)),
Image.from_text("Countries", arial(14, bold=True)),
nlegs
], bg="white", padding=(0,3), xalign=0).pad(5, "white").pad(1, "black")
chart = Image.from_row([chart, legend], bg="white", yalign=0, padding=5)
title = Image.from_text("Geographic distribution of Literature Nobel laureates", arial(24, bold=True)).pad((0,4,0,8), "white")
img = Image.from_column([title, chart], bg="white")
img.place(Image.from_text("/u/Udzu", font("arial", 16), fg="black", bg="white", padding=5).pad((1,1,0,0), "black"), align=1, padding=10, copy=False)
img.save("output/nobels_lit.png")
| Udzu/pudzu | dataviz/nobelslit.py | Python | mit | 5,009 |
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import webob.exc as wexc
from neutron.api.v2 import base
from neutron.common import constants as n_const
from neutron import context
from neutron.extensions import portbindings
from neutron import manager
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import config as ml2_config
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2.drivers.cisco.nexus import config as cisco_config
from neutron.plugins.ml2.drivers.cisco.nexus import exceptions as c_exc
from neutron.plugins.ml2.drivers.cisco.nexus import mech_cisco_nexus
from neutron.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from neutron.plugins.ml2.drivers.cisco.nexus import nexus_network_driver
from neutron.plugins.ml2.drivers import type_vlan as vlan_config
from neutron.tests.unit import test_db_plugin
LOG = logging.getLogger(__name__)
ML2_PLUGIN = 'neutron.plugins.ml2.plugin.Ml2Plugin'
PHYS_NET = 'physnet1'
COMP_HOST_NAME = 'testhost'
COMP_HOST_NAME_2 = 'testhost_2'
VLAN_START = 1000
VLAN_END = 1100
NEXUS_IP_ADDR = '1.1.1.1'
NETWORK_NAME = 'test_network'
NETWORK_NAME_2 = 'test_network_2'
NEXUS_INTERFACE = '1/1'
NEXUS_INTERFACE_2 = '1/2'
CIDR_1 = '10.0.0.0/24'
CIDR_2 = '10.0.1.0/24'
DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111'
DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222'
DEVICE_OWNER = 'compute:None'
BOUND_SEGMENT1 = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PHYS_NET,
api.SEGMENTATION_ID: VLAN_START}
BOUND_SEGMENT2 = {api.NETWORK_TYPE: p_const.TYPE_VLAN,
api.PHYSICAL_NETWORK: PHYS_NET,
api.SEGMENTATION_ID: VLAN_START + 1}
class CiscoML2MechanismTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
"""Configure for end-to-end neutron testing using a mock ncclient.
This setup includes:
- Configure the ML2 plugin to use VLANs in the range of 1000-1100.
- Configure the Cisco mechanism driver to use an imaginary switch
at NEXUS_IP_ADDR.
- Create a mock NETCONF client (ncclient) for the Cisco mechanism
driver
"""
# Configure the ML2 mechanism drivers and network types
ml2_opts = {
'mechanism_drivers': ['cisco_nexus'],
'tenant_network_types': ['vlan'],
}
for opt, val in ml2_opts.items():
ml2_config.cfg.CONF.set_override(opt, val, 'ml2')
# Configure the ML2 VLAN parameters
phys_vrange = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)])
vlan_config.cfg.CONF.set_override('network_vlan_ranges',
[phys_vrange],
'ml2_type_vlan')
# Configure the Cisco Nexus mechanism driver
nexus_config = {
(NEXUS_IP_ADDR, 'username'): 'admin',
(NEXUS_IP_ADDR, 'password'): 'mySecretPassword',
(NEXUS_IP_ADDR, 'ssh_port'): 22,
(NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE,
(NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2}
nexus_patch = mock.patch.dict(
cisco_config.ML2MechCiscoConfig.nexus_dict,
nexus_config)
nexus_patch.start()
self.addCleanup(nexus_patch.stop)
# The NETCONF client module is not included in the DevStack
# distribution, so mock this module for unit testing.
self.mock_ncclient = mock.Mock()
mock.patch.object(nexus_network_driver.CiscoNexusDriver,
'_import_ncclient',
return_value=self.mock_ncclient).start()
# Mock port context values for bound_segments and 'status'.
self.mock_bound_segment = mock.patch.object(
driver_context.PortContext,
'bound_segment',
new_callable=mock.PropertyMock).start()
self.mock_bound_segment.return_value = BOUND_SEGMENT1
self.mock_original_bound_segment = mock.patch.object(
driver_context.PortContext,
'original_bound_segment',
new_callable=mock.PropertyMock).start()
self.mock_original_bound_segment.return_value = None
mock_status = mock.patch.object(
mech_cisco_nexus.CiscoNexusMechanismDriver,
'_is_status_active').start()
mock_status.return_value = n_const.PORT_STATUS_ACTIVE
super(CiscoML2MechanismTestCase, self).setUp(ML2_PLUGIN)
self.port_create_status = 'DOWN'
@contextlib.contextmanager
def _patch_ncclient(self, attr, value):
"""Configure an attribute on the mock ncclient module.
This method can be used to inject errors by setting a side effect
or a return value for an ncclient method.
:param attr: ncclient attribute (typically method) to be configured.
:param value: Value to be configured on the attribute.
"""
# Configure attribute.
config = {attr: value}
self.mock_ncclient.configure_mock(**config)
# Continue testing
yield
# Unconfigure attribute
config = {attr: None}
self.mock_ncclient.configure_mock(**config)
@staticmethod
def _config_dependent_side_effect(match_config, exc):
"""Generates a config-dependent side effect for ncclient edit_config.
This method generates a mock side-effect function which can be
configured on the mock ncclient module for the edit_config method.
This side effect will cause a given exception to be raised whenever
the XML config string that is passed to edit_config contains all
words in a given match config string.
:param match_config: String containing keywords to be matched
:param exc: Exception to be raised when match is found
:return: Side effect function for the mock ncclient module's
edit_config method.
"""
keywords = match_config.split()
def _side_effect_function(target, config):
if all(word in config for word in keywords):
raise exc
return _side_effect_function
def _is_in_nexus_cfg(self, words):
"""Check if any config sent to Nexus contains all words in a list."""
for call in (self.mock_ncclient.connect.return_value.
edit_config.mock_calls):
configlet = call[2]['config']
if all(word in configlet for word in words):
return True
return False
def _is_in_last_nexus_cfg(self, words):
"""Confirm last config sent to Nexus contains specified keywords."""
last_cfg = (self.mock_ncclient.connect.return_value.
edit_config.mock_calls[-1][2]['config'])
return all(word in last_cfg for word in words)
def _is_vlan_configured(self, vlan_creation_expected=True,
add_keyword_expected=False):
vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name'])
add_appears = self._is_in_last_nexus_cfg(['add'])
return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and
vlan_created == vlan_creation_expected and
add_appears == add_keyword_expected)
def _is_vlan_unconfigured(self, vlan_deletion_expected=True):
vlan_deleted = self._is_in_last_nexus_cfg(
['no', 'vlan', 'vlan-id-create-delete'])
return (self._is_in_nexus_cfg(['allowed', 'vlan', 'remove']) and
vlan_deleted == vlan_deletion_expected)
class TestCiscoBasicGet(CiscoML2MechanismTestCase,
test_db_plugin.TestBasicGet):
pass
class TestCiscoV2HTTPResponse(CiscoML2MechanismTestCase,
test_db_plugin.TestV2HTTPResponse):
pass
class TestCiscoPortsV2(CiscoML2MechanismTestCase,
test_db_plugin.TestPortsV2):
@contextlib.contextmanager
def _create_resources(self, name=NETWORK_NAME, cidr=CIDR_1,
device_id=DEVICE_ID_1,
host_id=COMP_HOST_NAME):
"""Create network, subnet, and port resources for test cases.
Create a network, subnet, port and then update the port, yield the
result, then delete the port, subnet and network.
:param name: Name of network to be created.
:param cidr: cidr address of subnetwork to be created.
:param device_id: Device ID to use for port to be created/updated.
:param host_id: Host ID to use for port create/update.
"""
with self.network(name=name) as network:
with self.subnet(network=network, cidr=cidr) as subnet:
with self.port(subnet=subnet, cidr=cidr) as port:
data = {'port': {portbindings.HOST_ID: host_id,
'device_id': device_id,
'device_owner': 'compute:none',
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
yield req.get_response(self.api)
def _assertExpectedHTTP(self, status, exc):
"""Confirm that an HTTP status corresponds to an expected exception.
Confirm that an HTTP status which has been returned for an
neutron API request matches the HTTP status corresponding
to an expected exception.
:param status: HTTP status
:param exc: Expected exception
"""
if exc in base.FAULT_MAP:
expected_http = base.FAULT_MAP[exc].code
else:
expected_http = wexc.HTTPInternalServerError.code
self.assertEqual(status, expected_http)
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API chooses the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test',
True)
# Expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_emulated(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
ctx = context.get_admin_context()
with self.network() as net:
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_port
with mock.patch.object(plugin_obj,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_port_bulk(self.fmt, 2, net['network']['id'],
'test', True, context=ctx)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_nexus_enable_vlan_cmd(self):
"""Verify the syntax of the command to enable a vlan on an intf.
Confirm that for the first VLAN configured on a Nexus interface,
the command string sent to the switch does not contain the
keyword 'add'.
Confirm that for the second VLAN configured on a Nexus interface,
the command string sent to the switch contains the keyword 'add'.
"""
# First vlan should be configured without 'add' keyword
with self._create_resources():
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
self.mock_bound_segment.return_value = BOUND_SEGMENT2
# Second vlan should be configured with 'add' keyword
with self._create_resources(name=NETWORK_NAME_2,
device_id=DEVICE_ID_2,
cidr=CIDR_2):
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=True))
# Return to first segment for delete port calls.
self.mock_bound_segment.return_value = BOUND_SEGMENT1
def test_nexus_add_trunk(self):
"""Verify syntax to enable a vlan on an interface.
Test also verifies that the vlan interface is not created.
Test of the following ml2_conf_cisco_ini config:
[ml2_mech_cisco_nexus:1.1.1.1]
hostA=1/1
hostB=1/2
where vlan_id = 100
Confirm that for the first host configured on a Nexus interface,
the command string sent to the switch does not contain the
keyword 'add'.
Confirm that for the second host configured on a Nexus interface,
the command staring sent to the switch contains does not contain
the keyword 'name' [signifies vlan intf creation].
"""
with self._create_resources(name='net1', cidr=CIDR_1):
self.assertTrue(self._is_in_last_nexus_cfg(['allowed', 'vlan']))
self.assertFalse(self._is_in_last_nexus_cfg(['add']))
with self._create_resources(name='net2',
cidr=CIDR_2, host_id=COMP_HOST_NAME_2):
self.assertTrue(
self._is_in_last_nexus_cfg(['allowed', 'vlan']))
self.assertFalse(self._is_in_last_nexus_cfg(['name']))
def test_nexus_connect_fail(self):
"""Test failure to connect to a Nexus switch.
While creating a network, subnet, and port, simulate a connection
failure to a nexus switch. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient('connect.side_effect',
AttributeError):
with self._create_resources() as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConnectFailed)
def test_nexus_vlan_config_two_hosts(self):
"""Verify config/unconfig of vlan on two compute hosts."""
@contextlib.contextmanager
def _create_port_check_vlan(comp_host_name, device_id,
vlan_creation_expected=True):
with self.port(subnet=subnet, fmt=self.fmt) as port:
data = {'port': {portbindings.HOST_ID: comp_host_name,
'device_id': device_id,
'device_owner': DEVICE_OWNER,
'admin_state_up': True}}
req = self.new_update_request('ports', data,
port['port']['id'])
req.get_response(self.api)
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=vlan_creation_expected,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
yield
# Create network and subnet
with self.network(name=NETWORK_NAME) as network:
with self.subnet(network=network, cidr=CIDR_1) as subnet:
# Create an instance on first compute host
with _create_port_check_vlan(COMP_HOST_NAME, DEVICE_ID_1,
vlan_creation_expected=True):
# Create an instance on second compute host
with _create_port_check_vlan(COMP_HOST_NAME_2, DEVICE_ID_2,
vlan_creation_expected=False):
pass
# Instance on second host is now terminated.
# Vlan should be untrunked from port, but vlan should
# still exist on the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=False))
self.mock_ncclient.reset_mock()
# Instance on first host is now terminated.
# Vlan should be untrunked from port and vlan should have
# been deleted from the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=True))
def test_nexus_vm_migration(self):
"""Verify VM (live) migration.
Simulate the following:
Nova informs neutron of live-migration with port-update(new host).
This should trigger two update_port_pre/postcommit() calls.
The first one should only change the current host_id and remove the
binding resulting in the mechanism drivers receiving:
PortContext.original['binding:host_id']: previous value
PortContext.original_bound_segment: previous value
PortContext.current['binding:host_id']: current (new) value
PortContext.bound_segment: None
The second one binds the new host resulting in the mechanism
drivers receiving:
PortContext.original['binding:host_id']: previous value
PortContext.original_bound_segment: None
PortContext.current['binding:host_id']: previous value
PortContext.bound_segment: new value
"""
# Create network, subnet and port.
with self._create_resources() as result:
# Verify initial database entry.
# Use port_id to verify that 1st host name was used.
binding = nexus_db_v2.get_nexusvm_bindings(VLAN_START,
DEVICE_ID_1)[0]
intf_type, nexus_port = binding.port_id.split(':')
self.assertEqual(nexus_port, NEXUS_INTERFACE)
port = self.deserialize(self.fmt, result)
port_id = port['port']['id']
# Trigger update event to unbind segment.
# Results in port being deleted from nexus DB and switch.
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}}
self.mock_bound_segment.return_value = None
self.mock_original_bound_segment.return_value = BOUND_SEGMENT1
self.new_update_request('ports', data,
port_id).get_response(self.api)
# Verify that port entry has been deleted.
self.assertRaises(c_exc.NexusPortBindingNotFound,
nexus_db_v2.get_nexusvm_bindings,
VLAN_START, DEVICE_ID_1)
# Trigger update event to bind segment with new host.
self.mock_bound_segment.return_value = BOUND_SEGMENT1
self.mock_original_bound_segment.return_value = None
self.new_update_request('ports', data,
port_id).get_response(self.api)
# Verify that port entry has been added using new host name.
# Use port_id to verify that 2nd host name was used.
binding = nexus_db_v2.get_nexusvm_bindings(VLAN_START,
DEVICE_ID_1)[0]
intf_type, nexus_port = binding.port_id.split(':')
self.assertEqual(nexus_port, NEXUS_INTERFACE_2)
def test_nexus_config_fail(self):
"""Test a Nexus switch configuration failure.
While creating a network, subnet, and port, simulate a nexus
switch configuration error. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
AttributeError):
with self._create_resources() as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConfigFailed)
def test_nexus_extended_vlan_range_failure(self):
"""Test that extended VLAN range config errors are ignored.
Some versions of Nexus switch do not allow state changes for
the extended VLAN range (1006-4094), but these errors can be
ignored (default values are appropriate). Test that such errors
are ignored by the Nexus plugin.
"""
def mock_edit_config_a(target, config):
if all(word in config for word in ['state', 'active']):
raise Exception("Can't modify state for extended")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_a):
with self._create_resources() as result:
self.assertEqual(result.status_int, wexc.HTTPOk.code)
def mock_edit_config_b(target, config):
if all(word in config for word in ['no', 'shutdown']):
raise Exception("Command is only allowed on VLAN")
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
mock_edit_config_b):
with self._create_resources() as result:
self.assertEqual(result.status_int, wexc.HTTPOk.code)
def test_nexus_vlan_config_rollback(self):
"""Test rollback following Nexus VLAN state config failure.
Test that the Cisco Nexus plugin correctly deletes the VLAN
on the Nexus switch when the 'state active' command fails (for
a reason other than state configuration change is rejected
for the extended VLAN range).
"""
vlan_state_configs = ['state active', 'no shutdown']
for config in vlan_state_configs:
with self._patch_ncclient(
'connect.return_value.edit_config.side_effect',
self._config_dependent_side_effect(config, ValueError)):
with self._create_resources() as result:
# Confirm that the last configuration sent to the Nexus
# switch was deletion of the VLAN.
self.assertTrue(
self._is_in_last_nexus_cfg(['<no>', '<vlan>'])
)
self._assertExpectedHTTP(result.status_int,
c_exc.NexusConfigFailed)
def test_nexus_host_not_configured(self):
"""Test handling of a NexusComputeHostNotConfigured exception.
Test the Cisco NexusComputeHostNotConfigured exception by using
a fictitious host name during port creation.
"""
with self._create_resources(host_id='fake_host') as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusComputeHostNotConfigured)
def test_nexus_missing_fields(self):
"""Test handling of a NexusMissingRequiredFields exception.
Test the Cisco NexusMissingRequiredFields exception by using
empty host_id and device_id values during port creation.
"""
with self._create_resources(device_id='', host_id='') as result:
self._assertExpectedHTTP(result.status_int,
c_exc.NexusMissingRequiredFields)
class TestCiscoNetworksV2(CiscoML2MechanismTestCase,
test_db_plugin.TestNetworksV2):
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_network
#ensures the API choose the emulation code path
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with mock.patch.object(plugin_obj,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
LOG.debug("response is %s" % res)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_network
with mock.patch.object(plugin_obj,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
class TestCiscoSubnetsV2(CiscoML2MechanismTestCase,
test_db_plugin.TestSubnetsV2):
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
plugin_obj = manager.NeutronManager.get_plugin()
orig = plugin_obj.create_subnet
with mock.patch.object(plugin_obj,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
class TestCiscoPortsV2XML(TestCiscoPortsV2):
fmt = 'xml'
class TestCiscoNetworksV2XML(TestCiscoNetworksV2):
fmt = 'xml'
class TestCiscoSubnetsV2XML(TestCiscoSubnetsV2):
fmt = 'xml'
| subramani95/neutron | neutron/tests/unit/ml2/drivers/cisco/nexus/test_cisco_mech.py | Python | apache-2.0 | 30,627 |
'''modparser.py - a tool to modify original zend_language_parser.y'''
import sys
import re
target = sys.stdout
source = file(sys.argv[1])
re_T_FOR = re.compile('\|\s*T_FOR\n')
counter_T_FOR = -1
update_args = False
def update_yacc_arg(line):
idx = line.find('$')
last_idx = 0
new_line = ''
# Replace $1 with $2, etc.
while idx > 0:
new_line += line[last_idx:idx]
arg_num = int(line[idx + 1]) + 1
new_line += '$' + str(arg_num)
last_idx = idx + 2
idx = line.find('$', last_idx)
return new_line + line[last_idx:]
for line in source:
if re_T_FOR.search(line):
counter_T_FOR = 1
if counter_T_FOR == 0:
line = line[:-1]
line += ' { zend_do_for_begin(TSRMLS_C); } \n'
update_args = True
if update_args:
if '|' in line:
update_args = False
else:
line = update_yacc_arg(line)
counter_T_FOR -= 1
target.write(line) | myaut/salsa3 | parsers/php-parser/modparser.py | Python | gpl-2.0 | 1,016 |
from setuptools import setup
requires = [
'requests',
'keyring',
]
setup(
name="gnome-shell-search-github-repositories",
version='1.0.2',
description="A gnome shell search provider for your github repos",
url="http://github.com/ralphbean/gnome-shell-search-github-repositories",
author="Ralph Bean",
author_email="rbean@redhat.com",
license='GPLv3',
install_requires=requires,
packages=['gs_search_github_repositories'],
zip_safe=False,
entry_points={
'console_scripts': [
'gnome-shell-search-github-repositories-daemon = gs_search_github_repositories.daemon:main',
'gnome-shell-search-github-repositories-config = gs_search_github_repositories.popup:main',
],
}
)
| ralphbean/gnome-shell-search-github-repositories | setup.py | Python | gpl-3.0 | 763 |
import time
class Player(object):
FOREVER = -1
def __init__(self, blinkytape):
self._blinkytape = blinkytape
def display_pattern(self, pattern):
self._blinkytape.update(pattern.pixels)
def play_animation(self, animation, num_cycles):
while num_cycles == self.FOREVER or num_cycles > 0:
self._play_single_animation_cycle(animation)
if num_cycles != self.FOREVER:
num_cycles = num_cycles - 1
def _play_single_animation_cycle(self, animation):
animation.begin()
while not animation.finished:
pixels = animation.next_frame()
self._blinkytape.update(pixels)
time.sleep(animation.frame_period_sec)
animation.end()
| jonspeicher/blinkyfun | blinkytape/player.py | Python | mit | 757 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Ironic SSH power driver."""
import fixtures
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
import paramiko
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules import ssh
from ironic.drivers import utils as driver_utils
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
CONF = cfg.CONF
class SSHValidateParametersTestCase(db_base.DbTestCase):
def test__parse_driver_info_good_password(self):
# make sure we get back the expected things
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info('password'))
info = ssh._parse_driver_info(node)
self.assertIsNotNone(info.get('host'))
self.assertIsNotNone(info.get('username'))
self.assertIsNotNone(info.get('password'))
self.assertIsNotNone(info.get('port'))
self.assertIsNotNone(info.get('virt_type'))
self.assertIsNotNone(info.get('cmd_set'))
self.assertIsNotNone(info.get('uuid'))
def test__parse_driver_info_good_key(self):
# make sure we get back the expected things
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info('key'))
info = ssh._parse_driver_info(node)
self.assertIsNotNone(info.get('host'))
self.assertIsNotNone(info.get('username'))
self.assertIsNotNone(info.get('key_contents'))
self.assertIsNotNone(info.get('port'))
self.assertIsNotNone(info.get('virt_type'))
self.assertIsNotNone(info.get('cmd_set'))
self.assertIsNotNone(info.get('uuid'))
def test__parse_driver_info_good_file(self):
# make sure we get back the expected things
d_info = db_utils.get_test_ssh_info('file')
tempdir = self.useFixture(fixtures.TempDir())
key_path = tempdir.path + '/foo'
open(key_path, 'wt').close()
d_info['ssh_key_filename'] = key_path
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=d_info)
info = ssh._parse_driver_info(node)
self.assertIsNotNone(info.get('host'))
self.assertIsNotNone(info.get('username'))
self.assertIsNotNone(info.get('key_filename'))
self.assertIsNotNone(info.get('port'))
self.assertIsNotNone(info.get('virt_type'))
self.assertIsNotNone(info.get('cmd_set'))
self.assertIsNotNone(info.get('uuid'))
def test__parse_driver_info_bad_file(self):
# A filename that doesn't exist errors.
info = db_utils.get_test_ssh_info('file')
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=info)
self.assertRaises(
exception.InvalidParameterValue, ssh._parse_driver_info, node)
def test__parse_driver_info_too_many(self):
info = db_utils.get_test_ssh_info('too_many')
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=info)
self.assertRaises(
exception.InvalidParameterValue, ssh._parse_driver_info, node)
def test__parse_driver_info_missing_host(self):
# make sure error is raised when info is missing
info = db_utils.get_test_ssh_info()
del info['ssh_address']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ssh._parse_driver_info,
node)
def test__parse_driver_info_missing_user(self):
# make sure error is raised when info is missing
info = db_utils.get_test_ssh_info()
del info['ssh_username']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ssh._parse_driver_info,
node)
def test__parse_driver_info_invalid_creds(self):
# make sure error is raised when info is missing
info = db_utils.get_test_ssh_info('no-creds')
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
ssh._parse_driver_info,
node)
def test__parse_driver_info_missing_virt_type(self):
# make sure error is raised when info is missing
info = db_utils.get_test_ssh_info()
del info['ssh_virt_type']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ssh._parse_driver_info,
node)
def test__parse_driver_info_ssh_port_wrong_type(self):
# make sure error is raised when ssh_port is not integer
info = db_utils.get_test_ssh_info()
info['ssh_port'] = 'wrong_port_value'
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
ssh._parse_driver_info,
node)
def test__normalize_mac_string(self):
mac_raw = "0A:1B-2C-3D:4F"
mac_clean = ssh._normalize_mac(mac_raw)
self.assertEqual("0a1b2c3d4f", mac_clean)
def test__normalize_mac_unicode(self):
mac_raw = u"0A:1B-2C-3D:4F"
mac_clean = ssh._normalize_mac(mac_raw)
self.assertEqual("0a1b2c3d4f", mac_clean)
def test__parse_driver_info_with_custom_libvirt_uri(self):
CONF.set_override('libvirt_uri', 'qemu:///foo', 'ssh')
expected_base_cmd = "LC_ALL=C /usr/bin/virsh --connect qemu:///foo"
node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info())
node['driver_info']['ssh_virt_type'] = 'virsh'
info = ssh._parse_driver_info(node)
self.assertEqual(expected_base_cmd, info['cmd_set']['base_cmd'])
def test__get_boot_device_map_parallels(self):
boot_map = ssh._get_boot_device_map('parallels')
self.assertEqual('net0', boot_map[boot_devices.PXE])
def test__get_boot_device_map_vbox(self):
boot_map = ssh._get_boot_device_map('vbox')
self.assertEqual('net', boot_map[boot_devices.PXE])
def test__get_boot_device_map_exception(self):
self.assertRaises(exception.InvalidParameterValue,
ssh._get_boot_device_map,
'this_doesn_t_exist')
class SSHPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(SSHPrivateMethodsTestCase, self).setUp()
self.node = obj_utils.get_test_node(
self.context,
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info())
self.sshclient = paramiko.SSHClient()
@mock.patch.object(utils, 'ssh_connect')
def test__get_connection_client(self, ssh_connect_mock):
ssh_connect_mock.return_value = self.sshclient
client = ssh._get_connection(self.node)
self.assertEqual(self.sshclient, client)
driver_info = ssh._parse_driver_info(self.node)
ssh_connect_mock.assert_called_once_with(driver_info)
@mock.patch.object(utils, 'ssh_connect')
def test__get_connection_exception(self, ssh_connect_mock):
ssh_connect_mock.side_effect = exception.SSHConnectFailed(host='fake')
self.assertRaises(exception.SSHConnectFailed,
ssh._get_connection,
self.node)
driver_info = ssh._parse_driver_info(self.node)
ssh_connect_mock.assert_called_once_with(driver_info)
@mock.patch.object(processutils, 'ssh_execute')
def test__ssh_execute(self, exec_ssh_mock):
ssh_cmd = "somecmd"
expected = ['a', 'b', 'c']
exec_ssh_mock.return_value = ('\n'.join(expected), '')
lst = ssh._ssh_execute(self.sshclient, ssh_cmd)
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
self.assertEqual(expected, lst)
@mock.patch.object(processutils, 'ssh_execute')
def test__ssh_execute_exception(self, exec_ssh_mock):
ssh_cmd = "somecmd"
exec_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.SSHCommandFailed,
ssh._ssh_execute,
self.sshclient,
ssh_cmd)
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__get_power_status_on(self, get_hosts_name_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
exec_ssh_mock.return_value = (
'"NodeName" {b43c4982-110c-4c29-9325-d5f41b053513}', '')
get_hosts_name_mock.return_value = "NodeName"
pstate = ssh._get_power_status(self.sshclient, info)
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_running'])
self.assertEqual(states.POWER_ON, pstate)
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__get_power_status_off(self, get_hosts_name_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
exec_ssh_mock.return_value = (
'"NodeName" {b43c4982-110c-4c29-9325-d5f41b053513}', '')
get_hosts_name_mock.return_value = "NotNodeName"
pstate = ssh._get_power_status(self.sshclient, info)
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_running'])
self.assertEqual(states.POWER_OFF, pstate)
exec_ssh_mock.assert_called_once_with(self.sshclient, ssh_cmd)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__get_power_status_error(self, get_hosts_name_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_hosts_name_mock.return_value = None
self.assertRaises(exception.NodeNotFound,
ssh._get_power_status,
self.sshclient,
info)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_not_called()
@mock.patch.object(processutils, 'ssh_execute')
def test__get_power_status_exception(self, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
exec_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.SSHCommandFailed,
ssh._get_power_status,
self.sshclient,
info)
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_all'])
exec_ssh_mock.assert_called_once_with(
self.sshclient, ssh_cmd)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__get_power_status_correct_node(self, get_hosts_name_mock,
exec_ssh_mock):
# Bug: #1397834 test that get_power_status return status of
# baremeta_1 (off) and not baremetal_11 (on)
info = ssh._parse_driver_info(self.node)
exec_ssh_mock.return_value = ('"baremetal_11"\n"seed"\n', '')
get_hosts_name_mock.return_value = "baremetal_1"
pstate = ssh._get_power_status(self.sshclient, info)
self.assertEqual(states.POWER_OFF, pstate)
@mock.patch.object(processutils, 'ssh_execute')
def test__get_hosts_name_for_node_match(self, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_all'])
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['get_node_macs'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
exec_ssh_mock.side_effect = [('NodeName', ''),
('52:54:00:cf:2d:31', '')]
expected = [mock.call(self.sshclient, ssh_cmd),
mock.call(self.sshclient, cmd_to_exec)]
found_name = ssh._get_hosts_name_for_node(self.sshclient, info)
self.assertEqual('NodeName', found_name)
self.assertEqual(expected, exec_ssh_mock.call_args_list)
@mock.patch.object(processutils, 'ssh_execute')
def test__get_hosts_name_for_node_no_match(self, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "22:22:22:22:22:22"]
exec_ssh_mock.side_effect = [('NodeName', ''),
('52:54:00:cf:2d:31', '')]
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_all'])
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['get_node_macs'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
expected = [mock.call(self.sshclient, ssh_cmd),
mock.call(self.sshclient, cmd_to_exec)]
found_name = ssh._get_hosts_name_for_node(self.sshclient, info)
self.assertIsNone(found_name)
self.assertEqual(expected, exec_ssh_mock.call_args_list)
@mock.patch.object(processutils, 'ssh_execute')
def test__get_hosts_name_for_node_exception(self, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
ssh_cmd = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['list_all'])
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['get_node_macs'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
exec_ssh_mock.side_effect = [('NodeName', ''),
processutils.ProcessExecutionError]
expected = [mock.call(self.sshclient, ssh_cmd),
mock.call(self.sshclient, cmd_to_exec)]
self.assertRaises(exception.SSHCommandFailed,
ssh._get_hosts_name_for_node,
self.sshclient,
info)
self.assertEqual(expected, exec_ssh_mock.call_args_list)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_power_status')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__power_on_good(self, get_hosts_name_mock, get_power_status_mock,
exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_power_status_mock.side_effect = [states.POWER_OFF,
states.POWER_ON]
get_hosts_name_mock.return_value = "NodeName"
expected = [mock.call(self.sshclient, info),
mock.call(self.sshclient, info)]
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['start_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
current_state = ssh._power_on(self.sshclient, info)
self.assertEqual(states.POWER_ON, current_state)
self.assertEqual(expected, get_power_status_mock.call_args_list)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_power_status')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__power_on_fail(self, get_hosts_name_mock, get_power_status_mock,
exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_power_status_mock.side_effect = [states.POWER_OFF,
states.POWER_OFF]
get_hosts_name_mock.return_value = "NodeName"
expected = [mock.call(self.sshclient, info),
mock.call(self.sshclient, info)]
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['start_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
current_state = ssh._power_on(self.sshclient, info)
self.assertEqual(states.ERROR, current_state)
self.assertEqual(expected, get_power_status_mock.call_args_list)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_power_status')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__power_on_exception(self, get_hosts_name_mock,
get_power_status_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
exec_ssh_mock.side_effect = processutils.ProcessExecutionError
get_power_status_mock.side_effect = [states.POWER_OFF,
states.POWER_ON]
get_hosts_name_mock.return_value = "NodeName"
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['start_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
self.assertRaises(exception.SSHCommandFailed,
ssh._power_on,
self.sshclient,
info)
get_power_status_mock.assert_called_once_with(self.sshclient, info)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_power_status')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__power_off_good(self, get_hosts_name_mock,
get_power_status_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_power_status_mock.side_effect = [states.POWER_ON,
states.POWER_OFF]
get_hosts_name_mock.return_value = "NodeName"
expected = [mock.call(self.sshclient, info),
mock.call(self.sshclient, info)]
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['stop_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
current_state = ssh._power_off(self.sshclient, info)
self.assertEqual(states.POWER_OFF, current_state)
self.assertEqual(expected, get_power_status_mock.call_args_list)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_power_status')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__power_off_fail(self, get_hosts_name_mock,
get_power_status_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_power_status_mock.side_effect = [states.POWER_ON,
states.POWER_ON]
get_hosts_name_mock.return_value = "NodeName"
expected = [mock.call(self.sshclient, info),
mock.call(self.sshclient, info)]
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['stop_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
current_state = ssh._power_off(self.sshclient, info)
self.assertEqual(states.ERROR, current_state)
self.assertEqual(expected, get_power_status_mock.call_args_list)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
@mock.patch.object(processutils, 'ssh_execute')
@mock.patch.object(ssh, '_get_power_status')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test__power_off_exception(self, get_hosts_name_mock,
get_power_status_mock, exec_ssh_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
exec_ssh_mock.side_effect = processutils.ProcessExecutionError
get_power_status_mock.side_effect = [states.POWER_ON,
states.POWER_OFF]
get_hosts_name_mock.return_value = "NodeName"
cmd_to_exec = "%s %s" % (info['cmd_set']['base_cmd'],
info['cmd_set']['stop_cmd'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', 'NodeName')
self.assertRaises(exception.SSHCommandFailed, ssh._power_off,
self.sshclient, info)
get_power_status_mock.assert_called_once_with(self.sshclient, info)
get_hosts_name_mock.assert_called_once_with(self.sshclient, info)
exec_ssh_mock.assert_called_once_with(self.sshclient, cmd_to_exec)
def test_exec_ssh_command_good(self):
class Channel(object):
def recv_exit_status(self):
return 0
class Stream(object):
def __init__(self, buffer=''):
self.buffer = buffer
self.channel = Channel()
def read(self):
return self.buffer
def close(self):
pass
with mock.patch.object(self.sshclient,
'exec_command') as exec_command_mock:
exec_command_mock.return_value = (Stream(),
Stream('hello'),
Stream())
stdout, stderr = processutils.ssh_execute(self.sshclient,
"command")
self.assertEqual('hello', stdout)
exec_command_mock.assert_called_once_with("command")
def test_exec_ssh_command_fail(self):
class Channel(object):
def recv_exit_status(self):
return 127
class Stream(object):
def __init__(self, buffer=''):
self.buffer = buffer
self.channel = Channel()
def read(self):
return self.buffer
def close(self):
pass
with mock.patch.object(self.sshclient,
'exec_command') as exec_command_mock:
exec_command_mock.return_value = (Stream(),
Stream('hello'),
Stream())
self.assertRaises(processutils.ProcessExecutionError,
processutils.ssh_execute,
self.sshclient,
"command")
exec_command_mock.assert_called_once_with("command")
class SSHDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(SSHDriverTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ssh")
self.driver = driver_factory.get_driver("fake_ssh")
self.node = obj_utils.create_test_node(
self.context, driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info())
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
self.sshclient = paramiko.SSHClient()
@mock.patch.object(utils, 'ssh_connect')
def test__validate_info_ssh_connect_failed(self, ssh_connect_mock):
info = ssh._parse_driver_info(self.node)
ssh_connect_mock.side_effect = exception.SSHConnectFailed(host='fake')
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate, task)
driver_info = ssh._parse_driver_info(task.node)
ssh_connect_mock.assert_called_once_with(driver_info)
def test_get_properties(self):
expected = ssh.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.power.get_properties())
self.assertEqual(expected, task.driver.get_properties())
self.assertEqual(expected, task.driver.management.get_properties())
def test_validate_fail_no_port(self):
new_node = obj_utils.create_test_node(
self.context,
uuid='aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee',
driver='fake_ssh',
driver_info=db_utils.get_test_ssh_info())
with task_manager.acquire(self.context, new_node.uuid,
shared=True) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.power.validate,
task)
@mock.patch.object(driver_utils, 'get_node_mac_addresses')
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_power_on')
def test_reboot_good(self, power_on_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_on_mock.return_value = states.POWER_ON
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.power.reboot(task)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_on_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses')
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_power_on')
def test_reboot_fail(self, power_on_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_on_mock.return_value = states.POWER_OFF
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(exception.PowerStateFailure,
task.driver.power.reboot, task)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_on_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses')
@mock.patch.object(ssh, '_get_connection')
def test_set_power_state_bad_state(self, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(
exception.InvalidParameterValue,
task.driver.power.set_power_state,
task,
"BAD_PSTATE")
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
@mock.patch.object(driver_utils, 'get_node_mac_addresses')
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_power_on')
def test_set_power_state_on_good(self, power_on_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_on_mock.return_value = states.POWER_ON
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_on_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses')
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_power_on')
def test_set_power_state_on_fail(self, power_on_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_on_mock.return_value = states.POWER_OFF
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(
exception.PowerStateFailure,
task.driver.power.set_power_state,
task,
states.POWER_ON)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_on_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses')
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_power_off')
def test_set_power_state_off_good(self, power_off_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_off_mock.return_value = states.POWER_OFF
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
task.driver.power.set_power_state(task, states.POWER_OFF)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_off_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(driver_utils, 'get_node_mac_addresses')
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_power_off')
def test_set_power_state_off_fail(self, power_off_mock, get_conn_mock,
get_mac_addr_mock):
info = ssh._parse_driver_info(self.node)
info['macs'] = ["11:11:11:11:11:11", "52:54:00:cf:2d:31"]
get_mac_addr_mock.return_value = info['macs']
get_conn_mock.return_value = self.sshclient
power_off_mock.return_value = states.POWER_ON
with mock.patch.object(ssh,
'_parse_driver_info') as parse_drv_info_mock:
parse_drv_info_mock.return_value = info
with task_manager.acquire(self.context, info['uuid'],
shared=False) as task:
self.assertRaises(
exception.PowerStateFailure,
task.driver.power.set_power_state,
task,
states.POWER_OFF)
parse_drv_info_mock.assert_called_once_with(task.node)
get_mac_addr_mock.assert_called_once_with(mock.ANY)
get_conn_mock.assert_called_once_with(task.node)
power_off_mock.assert_called_once_with(self.sshclient, info)
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
@mock.patch.object(ssh, '_ssh_execute')
def test_management_interface_set_boot_device_vbox_ok(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'vbox'
self.driver.management.set_boot_device(task, boot_devices.PXE)
expected_cmd = ('LC_ALL=C /usr/bin/VBoxManage modifyvm %s '
'--boot1 net') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
@mock.patch.object(ssh, '_ssh_execute')
def test_management_interface_set_boot_device_parallels_ok(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'parallels'
self.driver.management.set_boot_device(task, boot_devices.PXE)
expected_cmd = ('LC_ALL=C /usr/bin/prlctl set %s '
'--device-bootorder "net0"') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
@mock.patch.object(ssh, '_ssh_execute')
def test_management_interface_set_boot_device_virsh_ok(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'virsh'
self.driver.management.set_boot_device(task, boot_devices.PXE)
expected_cmd = ('EDITOR="sed -i \'/<boot \\(dev\\|order\\)=*\\>'
'/d;/<\\/os>/i\\<boot dev=\\"network\\"/>\'" '
'LC_ALL=C /usr/bin/virsh --connect qemu:///system '
'edit %s') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
def test_set_boot_device_bad_device(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.management.set_boot_device,
task, 'invalid-device')
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test_set_boot_device_not_supported(self, mock_h, mock_get_conn):
mock_h.return_value = 'NodeName'
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
# vmware does not support set_boot_device()
task.node['driver_info']['ssh_virt_type'] = 'vmware'
self.assertRaises(NotImplementedError,
self.driver.management.set_boot_device,
task, boot_devices.PXE)
def test_management_interface_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM]
self.assertEqual(sorted(expected), sorted(task.driver.management.
get_supported_boot_devices()))
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
@mock.patch.object(ssh, '_ssh_execute')
def test_management_interface_get_boot_device_vbox(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_exc.return_value = ('net', '')
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'vbox'
result = self.driver.management.get_boot_device(task)
self.assertEqual(boot_devices.PXE, result['boot_device'])
expected_cmd = ('LC_ALL=C /usr/bin/VBoxManage showvminfo '
'--machinereadable %s '
'| awk -F \'"\' \'/boot1/{print $2}\'') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
@mock.patch.object(ssh, '_ssh_execute')
def test_management_interface_get_boot_device_parallels(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_exc.return_value = ('net0', '')
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'parallels'
result = self.driver.management.get_boot_device(task)
self.assertEqual(boot_devices.PXE, result['boot_device'])
expected_cmd = ('LC_ALL=C /usr/bin/prlctl list -i %s '
'| awk \'/^Boot order:/ {print $3}\'') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
@mock.patch.object(ssh, '_ssh_execute')
def test_management_interface_get_boot_device_virsh(self, mock_exc,
mock_h,
mock_get_conn):
fake_name = 'fake-name'
mock_h.return_value = fake_name
mock_exc.return_value = ('network', '')
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'virsh'
result = self.driver.management.get_boot_device(task)
self.assertEqual(boot_devices.PXE, result['boot_device'])
expected_cmd = ('LC_ALL=C /usr/bin/virsh --connect '
'qemu:///system dumpxml %s | awk \'/boot dev=/ '
'{ gsub( ".*dev=" Q, "" ); gsub( Q ".*", "" ); '
'print; }\' Q="\'" RS="[<>]" | head -1') % fake_name
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
def test_get_boot_device_not_supported(self, mock_h, mock_get_conn):
mock_h.return_value = 'NodeName'
mock_get_conn.return_value = self.sshclient
with task_manager.acquire(self.context, self.node.uuid) as task:
# vmware does not support get_boot_device()
task.node['driver_info']['ssh_virt_type'] = 'vmware'
expected = {'boot_device': None, 'persistent': None}
self.assertEqual(expected,
self.driver.management.get_boot_device(task))
@mock.patch.object(ssh, '_get_connection')
@mock.patch.object(ssh, '_get_hosts_name_for_node')
@mock.patch.object(ssh, '_ssh_execute')
def test_get_power_state_vmware(self, mock_exc, mock_h, mock_get_conn):
# To see replacing {_NodeName_} in vmware's list_running
nodename = 'fakevm'
mock_h.return_value = nodename
mock_get_conn.return_value = self.sshclient
# list_running quotes names
mock_exc.return_value = ('"%s"' % nodename, '')
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node['driver_info']['ssh_virt_type'] = 'vmware'
power_state = self.driver.power.get_power_state(task)
self.assertEqual(states.POWER_ON, power_state)
expected_cmd = ("LC_ALL=C /bin/vim-cmd vmsvc/power.getstate "
"%(node)s | grep 'Powered on' >/dev/null && "
"echo '\"%(node)s\"' || true") % {'node': nodename}
mock_exc.assert_called_once_with(mock.ANY, expected_cmd)
def test_management_interface_validate_good(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.management.validate(task)
def test_management_interface_validate_fail(self):
# Missing SSH driver_info information
node = obj_utils.create_test_node(self.context,
uuid=utils.generate_uuid(),
driver='fake_ssh')
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.management.validate, task)
| ramineni/myironic | ironic/tests/drivers/test_ssh.py | Python | apache-2.0 | 45,959 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parallel test runner for GoogleTest programs.
This script allows one to execute GoogleTest tests in parallel.
GoogleTest programs come with built-in support for running in parallel.
Here tests can automatically be partitioned across a number of test
program invocations ("shards"). This script provides a convenient
wrapper around that functionality and stream-lined output.
"""
from __future__ import print_function
import multiprocessing
import optparse
import os
import signal
import subprocess
import sys
DEFAULT_NUM_JOBS = int(multiprocessing.cpu_count() * 1.5)
class Bcolors(object):
"""
A collection of tty output modifiers.
To switch the output of a string, prefix it with the desired
modifier, and terminate it with 'ENDC'.
"""
HEADER = '\033[95m' if sys.stdout.isatty() else ''
OKBLUE = '\033[94m' if sys.stdout.isatty() else ''
OKGREEN = '\033[92m' if sys.stdout.isatty() else ''
WARNING = '\033[93m' if sys.stdout.isatty() else ''
FAIL = '\033[91m'if sys.stdout.isatty() else ''
ENDC = '\033[0m' if sys.stdout.isatty() else ''
BOLD = '\033[1m' if sys.stdout.isatty() else ''
UNDERLINE = '\033[4m' if sys.stdout.isatty() else ''
@staticmethod
def colorize(string, *color_codes):
"""Decorate a string with a number of color codes."""
colors = ''.join(color_codes)
return '{begin}{string}{end}'.format(
begin=colors if sys.stdout.isatty() else '',
string=string,
end=Bcolors.ENDC if sys.stdout.isatty() else '')
def run_test(opts):
"""
Perform an actual run of the test executable.
Expects a list of parameters giving the number of the current
shard, the total number of shards, and the executable to run.
"""
shard, nshards, executable = opts
signal.signal(signal.SIGINT, signal.SIG_IGN)
env = os.environ.copy()
env['GTEST_TOTAL_SHARDS'] = str(nshards)
env['GTEST_SHARD_INDEX'] = str(shard)
try:
output = subprocess.check_output(
executable.split(),
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
print(Bcolors.colorize('.', Bcolors.OKGREEN), end='')
sys.stdout.flush()
return True, output
except subprocess.CalledProcessError as error:
print(Bcolors.colorize('.', Bcolors.FAIL), end='')
sys.stdout.flush()
return False, error.output
def parse_arguments():
"""Return the executable to work on, and a list of options."""
parser = optparse.OptionParser(
usage='Usage: %prog [options] <test> [-- <test_options>]')
parser.add_option(
'-j', '--jobs', type='int',
default=DEFAULT_NUM_JOBS,
help='number of parallel jobs to spawn. DEFAULT: {default_}'
.format(default_=DEFAULT_NUM_JOBS))
parser.add_option(
'-s', '--sequential', type='string',
default='',
help='gtest filter for tests to run sequentially')
parser.add_option(
'-v', '--verbosity', type='int',
default=1,
help='output verbosity:'
' 0 only shows summarized information,'
' 1 also shows full logs of failed shards, and anything'
' >1 shows all output. DEFAULT: 1')
(options, executable) = parser.parse_args()
if not executable:
parser.print_usage()
sys.exit(1)
if not os.path.isfile(executable[0]):
print(
Bcolors.colorize(
"ERROR: File '{file}' does not exists"
.format(file=executable[0]), Bcolors.FAIL),
file=sys.stderr)
sys.exit(1)
if not os.access(executable[0], os.X_OK):
print(
Bcolors.colorize(
"ERROR: File '{file}' is not executable"
.format(file=executable[0]), Bcolors.FAIL),
file=sys.stderr)
sys.exit(1)
if options.sequential and options.sequential.count(':-'):
print(
Bcolors.colorize(
"ERROR: Cannot use negative filters in "
"'sequential' parameter: '{filter}'"
.format(filter=options.sequential), Bcolors.FAIL),
file=sys.stderr)
sys.exit(1)
if options.sequential and os.environ.get('GTEST_FILTER') and \
os.environ['GTEST_FILTER'].count(':-'):
print(
Bcolors.colorize(
"ERROR: Cannot specify both 'sequential' ""option "
"and environment variable 'GTEST_FILTER' "
"containing negative filters",
Bcolors.FAIL),
file=sys.stderr)
sys.exit(1)
# Since empty strings are falsy, directly compare against `None`
# to preserve an empty string passed via `GTEST_FILTER`.
if os.environ.get('GTEST_FILTER') != None:
options.parallel = '{env_filter}:-{sequential_filter}'\
.format(env_filter=os.environ['GTEST_FILTER'],
sequential_filter=options.sequential)
else:
options.parallel = '*:-{sequential_filter}'\
.format(sequential_filter=options.sequential)
return executable, options
if __name__ == '__main__':
EXECUTABLE, OPTIONS = parse_arguments()
def options_gen(executable, filter_, jobs):
"""Generator for options for a certain shard.
Here we set up GoogleTest specific flags, and generate
distinct shard indices.
"""
opts = range(jobs)
# If we run in a terminal, enable colored test output. We
# still allow users to disable this themselves via extra args.
if sys.stdout.isatty():
args = executable[1:]
executable = '{exe} --gtest_color=yes {args}'\
.format(exe=executable[0], args=args if args else '')
if filter_:
executable = '{exe} --gtest_filter={filter}'\
.format(exe=executable, filter=filter_)
for opt in opts:
yield opt, jobs, executable
try:
RESULTS = []
POOL = multiprocessing.Pool(processes=OPTIONS.jobs)
# Run parallel tests.
#
# Multiprocessing's `map` cannot properly handle `KeyboardInterrupt` in
# some python versions. Use `map_async` with an explicit timeout
# instead. See http://stackoverflow.com/a/1408476.
RESULTS.extend(
POOL.map_async(
run_test,
options_gen(
EXECUTABLE, OPTIONS.parallel, OPTIONS.jobs)).get(
timeout=sys.maxint))
# Now run sequential tests.
if OPTIONS.sequential:
RESULTS.extend(
POOL.map_async(
run_test,
options_gen(
EXECUTABLE, OPTIONS.sequential, 1)).get(
timeout=sys.maxint))
# Count the number of failed shards and print results from
# failed shards.
#
# NOTE: The `RESULTS` array stores the result for each
# `run_test` invocation returning a tuple (success, output).
NFAILED = len([success for success, __ in RESULTS if not success])
# TODO(bbannier): Introduce a verbosity which prints results
# as they arrive; this likely requires some output parsing to
# ensure results from different tests do not interleave.
for result in RESULTS:
if not result[0]:
if OPTIONS.verbosity > 0:
print(result[1], file=sys.stderr)
else:
if OPTIONS.verbosity > 1:
print(result[1], file=sys.stdout)
if NFAILED > 0:
print(Bcolors.colorize(
'\n[FAIL]: {nfailed} shard(s) have failed tests'.format(
nfailed=NFAILED),
Bcolors.FAIL, Bcolors.BOLD),
file=sys.stderr)
else:
print(Bcolors.colorize('\n[PASS]', Bcolors.OKGREEN, Bcolors.BOLD))
sys.exit(NFAILED)
except KeyboardInterrupt:
# Force a newline after intermediate test reports.
print()
print('Caught KeyboardInterrupt, terminating workers')
POOL.terminate()
POOL.join()
sys.exit(1)
except OSError as error:
print(Bcolors.colorize(
'\nERROR: {err}'.format(err=error),
Bcolors.FAIL, Bcolors.BOLD))
POOL.terminate()
POOL.join()
sys.exit(1)
| zmalik/mesos | support/mesos-gtest-runner.py | Python | apache-2.0 | 9,368 |
# Copyright 2015, Avi Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard import policy
from avidashboard.api import avi
import logging
LOG = logging.getLogger(__name__)
class AddCertificateLink(tables.LinkAction):
name = "addcertificate"
verbose_name = _("Add Certificate")
url = "horizon:project:loadbalancers:addcertificate"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("network", "create_health_monitor"),)
class DeleteCertificateLink(policy.PolicyTargetMixin,
tables.DeleteAction):
name = "deletecertificate"
policy_rules = (("network", "delete_health_monitor"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Certificate",
u"Delete Certificates",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Certificate",
u"Scheduled deletion of Certificates",
count
)
def delete(self, request, obj_id):
try:
avi.delete_cert(request, obj_id)
messages.success(request, _('Deleted certificate %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete certificate. %s') % e)
def _filter_allowed(request, datum):
vip = None
if datum.vip_id:
vip = api.lbaas.vip_get(request, datum.vip_id)
if not vip:
return False
if datum.protocol not in ["HTTPS", "HTTP"]:
return False
if vip.protocol not in ["HTTPS", "HTTP"]:
return False
return True
class AssociateCertificateLink(tables.LinkAction):
name = "associatecertificate"
verbose_name = _("Associate Certificates")
classes = ("ajax-modal", "btn-update")
policy_rules = (("network", "update_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:associatecertificate",
kwargs={'pool_id': pool.id})
return base_url
def allowed(self, request, datum=None):
if not datum:
return False
if not _filter_allowed(request, datum):
return False
# pool+vip proto is HTTP or HTTPS
try:
vip = avi.get_vip(request, datum.vip_id)
cert = avi.get_vip_cert(vip)
except:
# this prevents non-avi providers
return False
if not cert:
return True
if datum.protocol == 'HTTP':
return False
p = avi.get_pool_cert(request, datum.id)
if p:
return False
# atleast one of them doesnt have a certificate
return True
class DisassociateCertificateLink(tables.LinkAction):
name = "disassociatecertificate"
verbose_name = _("Disassociate Certificates")
classes = ("ajax-modal", "btn-danger")
policy_rules = (("network", "update_vip"),)
def get_link_url(self, pool):
base_url = reverse("horizon:project:loadbalancers:disassociatecertificate",
kwargs={'pool_id': pool.id})
return base_url
def allowed(self, request, datum=None):
if not datum:
return False
if not _filter_allowed(request, datum):
return False
# pool+vip proto is HTTP or HTTPS
try:
vip = avi.get_vip(request, datum.vip_id)
cert = avi.get_vip_cert(vip)
except:
# this prevents non-avi providers
return False
if cert:
return True
if datum.protocol == 'HTTP':
return False
p = avi.get_pool_cert(request, datum.id)
if p:
return True
return False
class CertificatesTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
#link="horizon:project:loadbalancers:certdetails"
)
cname = tables.Column("cname", verbose_name=_("Common Name"))
iname = tables.Column("iname", verbose_name=_("Issuer Name"))
algo = tables.Column("algo", verbose_name=_("Algorithm"))
self_signed = tables.Column("self_signed", verbose_name=_("Self Signed"))
expires = tables.Column("expires", verbose_name=_("Valid Until"))
class Meta(object):
name = "certificatestable"
verbose_name = _("Certificates")
table_actions = (AddCertificateLink, DeleteCertificateLink)
row_actions = ()
| avinetworks/avi-horizon-dashboard | avidashboard/dashboards/project/loadbalancers/tables.py | Python | apache-2.0 | 5,409 |
import cookielib
import mechanize
#import weakref
import random
import time
import uuid
import sys
from loremipsum import get_sentence
from birdie_settings import *
from initialize_db import (
User,
DBSession,
)
class FakeUser(object):
def __init__(self, browser):
rand = random.randrange(0, MAX_USERS)
row = DBSession.query(User.id, User.username, User.password)[rand]
self.id = row.id
self.username = row.username
self.password = row.password
self.br = browser
self.logged_in = False
def log_in(self):
timer = ()
if not self.logged_in:
timer = _login(self.br, self.username, self.password)
if timer[0] == 'Login':
self.logged_in = True
return timer
def post_chirp(self):
latency=0
timer = ()
br = self.br
_ = br.open(BASE_URL+'/'+self.username)
# chirps_count=random.randrange(1, MAX_CHIRPS)
# for i in range(chirps_count):
br.select_form(nr=0)
br.form[ 'chirp' ] = get_sentence()
start_timer = time.time()
resp = br.submit()
resp.read()
latency += time.time() - start_timer
# verify responses are valid
assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code
# assert ('my birdie' in resp.get_data()), 'Text Assertion Failed'
timer = 'Chirp', latency
return timer
def follow(self):
br=self.br
# randomly pick a friend - may be myself or a friend of mine, don't care
friend = FakeUser(self.br)
start_timer = time.time()
resp = br.open(BASE_URL+'/'+friend.username+'/follow')
resp.read()
latency = time.time() - start_timer
# verify responses are valid
assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code
# assert ('my birdie' in resp.get_data()), 'Text Assertion Failed'
timer = 'Follow', latency
return timer
def unfollow(self):
br=self.br
# randomly pick a friend - may be myself or not an actual friend of mine, don't care
old_friend = FakeUser(self.br)
start_timer = time.time()
resp = br.open(BASE_URL+'/'+old_friend.username+'/unfollow')
resp.read()
latency = time.time() - start_timer
# verify responses are valid
assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code
# assert ('my birdie' in resp.get_data()), 'Text Assertion Failed'
timer = 'Unfollow', latency
return timer
def view(self):
br=self.br
# randomly pick a user - may be myself or a friend of mine, don't care
buddy = FakeUser(self.br)
start_timer = time.time()
resp = br.open(BASE_URL+'/'+buddy.username+'/view')
resp.read()
latency = time.time() - start_timer
# verify responses are valid
assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code
# assert ('my birdie' in resp.get_data()), 'Text Assertion Failed'
timer = 'View_profile', latency
return timer
def __str__(self):
return "FakeUser<user=%s,logged_in=%s>" % (self.username, self.logged_in)
# utility functions
def init_browser():
"""Returns an initialized browser and associated cookie jar."""
br = mechanize.Browser()
br.set_handle_equiv(True)
# br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# add a custom header to declare "Believe me, I am not a robot"
br.addheaders = [('User-agent', 'Mozilla/5.0')]
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
return br
def _login(br, u, p):
timer = ()
_ = br.open(BASE_URL+'/login')
br.select_form(nr=0)
br.form[ 'login' ] = u
br.form[ 'password' ] = p
start_timer = time.time()
resp = br.submit()
resp.read()
latency = time.time() - start_timer
assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code
if 'Failed login' in resp.get_data():
timer= 'Login_failed', latency
else:
timer = 'Login', latency
return timer
def add_user(br):
timer=()
# build a brand new fake user
random_uid = str(uuid.uuid4())
fullname = random_uid
username = BASE_USERNAME+random_uid[:8]
password = random_uid[:8]
_ = br.open(BASE_URL+'/join')
br.select_form(nr=0)
br.form[ 'fullname' ] = fullname
br.form[ 'username' ] = username
br.form[ 'password' ] = password
br.form[ 'confirm' ] = password
br.form[ 'about' ] = ABOUT
start_time = time.time()
resp = br.submit()
resp.read()
latency = time.time() - start_time
# verify responses are valid
assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code
if resp.geturl() == BASE_URL+'/join':
timer = 'Failed_registration', latency
else:
timer = 'Register_new_user', latency
# add user in the local db (for future retrieval)
DBSession.add( User (username=username, password=password) )
# logout and reset cookie
resp = br.open(BASE_URL+'/logout')
# resp.read()
# verify responses are valid
assert (resp.code == 200), 'Bad Response: HTTP %s' % resp.code
assert ('Public Timeline' in resp.get_data()), 'Text Assertion Failed'
return timer
def populate_db(br=None, size=MAX_USERS):
if not br:
br = init_browser()
print ''
for index in range(size):
sys.stdout.write('\rPopulating the database with {} new users over {}'.format(index+1, MAX_USERS))
sys.stdout.flush()
add_user(br)
DBSession.commit()
print ''
| simonwoo/Birdie_Redis | birdie-stress/test_scripts/utils.py | Python | mit | 6,074 |
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client, RequestFactory, TestCase
from louderdev.articles.models import Article
class ArticleTest(TestCase):
def setUp(self):
self.client = Client()
self.factory = RequestFactory()
def test_validate_article_edition(self):
user2 = User.objects.create_user(username="teste12345",
email="reallynice2@gmail.com",
password="supersecret123")
article = Article()
article.title = "nicetitle"
article.content = "nicecontent"
article.create_user = user2
article.create_user.id = user2.id
article.save()
self.client.login(username="teste1234", password="supersecret123")
response = self.client.get(reverse('edit_article', kwargs={'id': '1'}))
self.assertEqual(response.status_code, 302)
response = self.client.post(reverse('edit_article',
kwargs={'id': '1'}))
self.assertEqual(response.status_code, 302)
self.client.login(username="teste12345", password="supersecret123")
response = self.client.get(reverse('edit_article', kwargs={'id': '1'}),
user=user2)
self.assertEqual(response.status_code, 200)
response = self.client.post(reverse('edit_article',
kwargs={'id': '1'}))
self.assertEqual(response.status_code, 200)
| drakeloud/louderdev | louderdev/home/tests/test_articles.py | Python | mit | 1,588 |
from __future__ import unicode_literals
from django.apps import apps
from django.conf import settings
from django.db import connection
from django.test import TestCase, skipIfDBFeature, skipUnlessDBFeature
from .models.tablespaces import (
Article, ArticleRef, Authors, Reviewers, Scientist, ScientistRef,
)
def sql_for_table(model):
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(model)
return editor.collected_sql[0]
def sql_for_index(model):
return '\n'.join(connection.schema_editor()._model_indexes_sql(model))
# We can't test the DEFAULT_TABLESPACE and DEFAULT_INDEX_TABLESPACE settings
# because they're evaluated when the model class is defined. As a consequence,
# @override_settings doesn't work, and the tests depend
class TablespacesTests(TestCase):
def setUp(self):
# The unmanaged models need to be removed after the test in order to
# prevent bad interactions with the flush operation in other tests.
self._old_models = apps.app_configs['model_options'].models.copy()
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = True
def tearDown(self):
for model in Article, Authors, Reviewers, Scientist:
model._meta.managed = False
apps.app_configs['model_options'].models = self._old_models
apps.all_models['model_options'] = self._old_models
apps.clear_cache()
def assertNumContains(self, haystack, needle, count):
real_count = haystack.count(needle)
self.assertEqual(real_count, count, "Found %d instances of '%s', expected %d" % (real_count, needle, count))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_model(self):
sql = sql_for_table(Scientist).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the index on the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
@skipIfDBFeature('supports_tablespaces')
def test_tablespace_ignored_for_model(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Scientist),
sql_for_table(ScientistRef))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_indexed_field(self):
sql = sql_for_table(Article).lower()
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
# 1 for the table + 1 for the primary key + 1 for the index on code
self.assertNumContains(sql, 'tbl_tbsp', 3)
# 1 for the index on reference
self.assertNumContains(sql, 'idx_tbsp', 1)
@skipIfDBFeature('supports_tablespaces')
def test_tablespace_ignored_for_indexed_field(self):
# No tablespace-related SQL
self.assertEqual(sql_for_table(Article),
sql_for_table(ArticleRef))
@skipUnlessDBFeature('supports_tablespaces')
def test_tablespace_for_many_to_many_field(self):
sql = sql_for_table(Authors).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_index(Authors).lower()
# The ManyToManyField declares no db_tablespace, its indexes go to
# the model's tablespace, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 2)
else:
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_table(Reviewers).lower()
# The join table of the ManyToManyField goes to the model's tablespace,
# and its indexes too, unless DEFAULT_INDEX_TABLESPACE is set.
if settings.DEFAULT_INDEX_TABLESPACE:
# 1 for the table
self.assertNumContains(sql, 'tbl_tbsp', 1)
# 1 for the primary key
self.assertNumContains(sql, settings.DEFAULT_INDEX_TABLESPACE, 1)
else:
# 1 for the table + 1 for the index on the primary key
self.assertNumContains(sql, 'tbl_tbsp', 2)
self.assertNumContains(sql, 'idx_tbsp', 0)
sql = sql_for_index(Reviewers).lower()
# The ManyToManyField declares db_tablespace, its indexes go there.
self.assertNumContains(sql, 'tbl_tbsp', 0)
self.assertNumContains(sql, 'idx_tbsp', 2)
| filias/django | tests/model_options/test_tablespaces.py | Python | bsd-3-clause | 5,370 |
"""Utility functions used by projects.
"""
import fnmatch
import os
import re
import subprocess
import traceback
from distutils2.version import NormalizedVersion, suggest_normalized_version
from django.conf import settings
from httplib2 import Http
import redis
from projects.libs.diff_match_patch import diff_match_patch
def find_file(file):
"""Find matching filenames in the current directory and its subdirectories,
and return a list of matching filenames.
"""
matches = []
for root, dirnames, filenames in os.walk('.'):
for filename in fnmatch.filter(filenames, file):
matches.append(os.path.join(root, filename))
return matches
def run(*commands):
"""
Run one or more commands, and return ``(status, out, err)``.
If more than one command is given, then this is equivalent to
chaining them together with ``&&``; if all commands succeed, then
``(status, out, err)`` will represent the last successful command.
If one command failed, then ``(status, out, err)`` will represent
the failed command.
"""
environment = os.environ.copy()
environment['READTHEDOCS'] = 'True'
cwd = os.getcwd()
if not commands:
raise ValueError("run() requires one or more command-line strings")
for command in commands:
print("Running: '%s'" % command)
try:
p = subprocess.Popen(command.split(), shell=False, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=environment)
out, err = p.communicate()
ret = p.returncode
except:
out = ''
err = traceback.format_exc()
ret = -1
print "Command failed: %s" % err
return (ret, out, err)
dmp = diff_match_patch()
def diff(txt1, txt2):
"""Create a 'diff' from txt1 to txt2."""
patch = dmp.patch_make(txt1, txt2)
return dmp.patch_toText(patch)
def safe_write(filename, contents):
"""Write ``contents`` to the given ``filename``. If the filename's
directory does not exist, it is created. Contents are written as UTF-8,
ignoring any characters that cannot be encoded as UTF-8.
"""
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'w') as fh:
fh.write(contents.encode('utf-8', 'ignore'))
fh.close()
CUSTOM_SLUG_RE = re.compile(r'[^-._\w]+$')
def _custom_slugify(data):
return CUSTOM_SLUG_RE.sub('', data)
def slugify_uniquely(model, initial, field, max_length, **filters):
slug = _custom_slugify(initial)[:max_length]
current = slug
index = 0
base_qs = model.objects.filter(**filters)
while base_qs.filter(**{field: current}).exists():
suffix = '-%s' % index
current = '%s%s' % (slug[:-len(suffix)], suffix)
index += 1
return current
def mkversion(version_obj):
try:
ver = NormalizedVersion(suggest_normalized_version(version_obj.slug))
return ver
except TypeError:
return None
def highest_version(version_list):
highest = [None, None]
for version in version_list:
ver = mkversion(version)
if not ver:
continue
elif highest[1] and ver:
#If there's a highest, and no version, we don't need to set anything
if ver > highest[1]:
highest = [version, ver]
else:
highest = [version, ver]
return highest
def purge_version(version, mainsite=False, subdomain=False, cname=False):
varnish_servers = getattr(settings, 'VARNISH_SERVERS', None)
h = Http()
if varnish_servers:
for server in varnish_servers:
if subdomain:
#Send a request to the Server, to purge the URL of the Host.
host = "%s.readthedocs.org" % version.project.slug
headers = {'Host': host}
url = "/en/%s/*" % version.slug
to_purge = "http://%s%s" % (server, url)
print "Purging %s on %s" % (url, host)
ret = h.request(to_purge, method="PURGE", headers=headers)
if mainsite:
headers = {'Host': "readthedocs.org"}
url = "/docs/%s/en/%s/*" % (version.project.slug, version.slug)
to_purge = "http://%s%s" % (server, url)
print "Purging %s on readthedocs.org" % url
ret = h.request(to_purge, method="PURGE", headers=headers)
root_url = "/docs/%s/" % version.project.slug
to_purge = "http://%s%s" % (server, root_url)
print "Purging %s on readthedocs.org" % root_url
ret2 = h.request(to_purge, method="PURGE", headers=headers)
if cname:
redis_conn = redis.Redis(**settings.REDIS)
for cnamed in redis_conn.smembers('rtd_slug:v1:%s' % version.project.slug):
headers = {'Host': cnamed}
url = "/en/%s/*" % version.slug
to_purge = "http://%s%s" % (server, url)
print "Purging %s on %s" % (url, cnamed)
ret = h.request(to_purge, method="PURGE", headers=headers)
root_url = "/"
to_purge = "http://%s%s" % (server, root_url)
print "Purging %s on %s" % (root_url, cnamed)
ret2 = h.request(to_purge, method="PURGE", headers=headers)
class DictObj(object):
def __getattr__(self, attr):
return self.__dict__.get(attr)
| alex/readthedocs.org | readthedocs/projects/utils.py | Python | mit | 5,640 |
from django.core.management.base import BaseCommand, CommandError
from engine import query
class Command(BaseCommand):
help = 'Type query to be searched'
args = "[create_index, clear_index, index_pages]"
def add_arguments(self, parser):
parser.add_argument('--options', type=str)
#parser.add_argument('-clear',default=False)
def handle(self, *args, **options):
print("Handling...")
query.search() | tanguy-s/ucl-search-engine | engine/management/commands/search.py | Python | mit | 452 |
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for reading OAuth 2.0 client secret files.
A client_secrets.json file contains all the information needed to interact with
an OAuth 2.0 protected service.
"""
__author__ = 'jcgregorio@google.com (Joe Gregorio)'
from anyjson import simplejson
# Properties that make a client_secrets.json file valid.
TYPE_WEB = 'web'
TYPE_INSTALLED = 'installed'
VALID_CLIENT = {
TYPE_WEB: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri',
],
'string': [
'client_id',
'client_secret',
],
},
TYPE_INSTALLED: {
'required': [
'client_id',
'client_secret',
'redirect_uris',
'auth_uri',
'token_uri',
],
'string': [
'client_id',
'client_secret',
],
},
}
class Error(Exception):
"""Base error for this module."""
pass
class InvalidClientSecretsError(Error):
"""Format of ClientSecrets file is invalid."""
pass
def _validate_clientsecrets(obj):
if obj is None or len(obj) != 1:
raise InvalidClientSecretsError('Invalid file format.')
client_type = obj.keys()[0]
if client_type not in VALID_CLIENT.keys():
raise InvalidClientSecretsError('Unknown client type: %s.' % client_type)
client_info = obj[client_type]
for prop_name in VALID_CLIENT[client_type]['required']:
if prop_name not in client_info:
raise InvalidClientSecretsError(
'Missing property "%s" in a client type of "%s".' % (prop_name,
client_type))
for prop_name in VALID_CLIENT[client_type]['string']:
if client_info[prop_name].startswith('[['):
raise InvalidClientSecretsError(
'Property "%s" is not configured.' % prop_name)
return client_type, client_info
def load(fp):
obj = simplejson.load(fp)
return _validate_clientsecrets(obj)
def loads(s):
obj = simplejson.loads(s)
return _validate_clientsecrets(obj)
def _loadfile(filename):
try:
fp = file(filename, 'r')
try:
obj = simplejson.load(fp)
finally:
fp.close()
except IOError:
raise InvalidClientSecretsError('File not found: "%s"' % filename)
return _validate_clientsecrets(obj)
def loadfile(filename, cache=None):
"""Loading of client_secrets JSON file, optionally backed by a cache.
Typical cache storage would be App Engine memcache service,
but you can pass in any other cache client that implements
these methods:
- get(key, namespace=ns)
- set(key, value, namespace=ns)
Usage:
# without caching
client_type, client_info = loadfile('secrets.json')
# using App Engine memcache service
from google.appengine.api import memcache
client_type, client_info = loadfile('secrets.json', cache=memcache)
Args:
filename: string, Path to a client_secrets.json file on a filesystem.
cache: An optional cache service client that implements get() and set()
methods. If not specified, the file is always being loaded from
a filesystem.
Raises:
InvalidClientSecretsError: In case of a validation error or some
I/O failure. Can happen only on cache miss.
Returns:
(client_type, client_info) tuple, as _loadfile() normally would.
JSON contents is validated only during first load. Cache hits are not
validated.
"""
_SECRET_NAMESPACE = 'oauth2client:secrets#ns'
if not cache:
return _loadfile(filename)
obj = cache.get(filename, namespace=_SECRET_NAMESPACE)
if obj is None:
client_type, client_info = _loadfile(filename)
obj = {client_type: client_info}
cache.set(filename, obj, namespace=_SECRET_NAMESPACE)
return obj.iteritems().next()
| RKD314/yumstat | yumstat/oauth2client/clientsecrets.py | Python | mit | 4,405 |
#!/usr/bin/env python
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from setuptools import setup
setup(
name='azure-mgmt-common',
version='0.20.0rc1',
description='Microsoft Azure Resource Management Client Library for Python (Common)',
long_description=open('README.rst', 'r').read(),
license='Apache License 2.0',
author='Microsoft Corporation',
author_email='ptvshelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: Apache Software License',
],
zip_safe=False,
packages=[
'azure',
'azure.mgmt',
'azure.mgmt.common',
],
install_requires=[
'azure-common',
'azure-mgmt-nspkg',
],
)
| ParallaxIT/azure-sdk-for-python | azure-mgmt-common/setup.py | Python | apache-2.0 | 1,777 |
# -*- coding: utf-8 -*-
from module.plugins.internal.DeadCrypter import DeadCrypter, create_getInfo
class CryptItCom(DeadCrypter):
__name__ = "CryptItCom"
__type__ = "crypter"
__version__ = "0.12"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?crypt-it\.com/(s|e|d|c)/\w+'
__config__ = [] #@TODO: Remove in 0.4.10
__description__ = """Crypt-it.com decrypter plugin"""
__license__ = "GPLv3"
__authors__ = [("jeix", "jeix@hasnomail.de")]
getInfo = create_getInfo(CryptItCom)
| fayf/pyload | module/plugins/crypter/CryptItCom.py | Python | gpl-3.0 | 542 |
#!/usr/bin/env python
print "Content-type: text/html"
print
print "<html>"
print "<body>"
print "<form action='action.py' method='post'>"
print "Name: <input type='text' name='name' /><br>"
print "Gender:<input type='text' name='gender'/><br>"
print "<input type='submit' />"
print "</form>"
print "</body>"
print "</html>"
| tuxfux-hlp-notes/python-batches | archieves/batch-57/cgi/dbform.py | Python | gpl-3.0 | 327 |
destination_cities = []
def add_city(city):
global destination_cities
destination_cities.append(city)
def get_city(index):
global destination_cities
return destination_cities[index]
def cities_number():
global destination_cities
return len(destination_cities)
| FelipeLimaM/sa-traveling-salesman | rodolpho-python/TourManager.py | Python | apache-2.0 | 287 |
#
# Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# The presence of this file turns this directory into a Python package
from qtgui_swig import *
import qtgui_swig as qtgui # to preserve the old interface
| tta/gnuradio-tta | gr-qtgui/python/__init__.py | Python | gpl-3.0 | 944 |
# coding: utf-8
"""
This module contains extra functions/shortcuts used to render HTML.
"""
import json
import re
import sys
from django import template
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template.loader import render_to_string
def _render_to_string(request, tpl, user_context):
"""Custom rendering function.
Just a wrapper which automatically adds a RequestContext instance
(useful to use settings variables like STATIC_URL inside templates)
"""
return render_to_string(tpl, user_context,
context_instance=template.RequestContext(request))
def _render_error(request, errortpl="error", user_context=None):
if user_context is None:
user_context = {}
return render(
request, "common/%s.html" % errortpl, user_context
)
def render_actions(actions):
t = template.Template("""{% load lib_tags %}
{% for a in actions %}{% render_link a %}{% endfor %}
""")
return t.render(template.Context(dict(actions=actions)))
def getctx(status, level=1, callback=None, **kwargs):
if not callback:
callername = sys._getframe(level).f_code.co_name
else:
callername = callback
ctx = {"status": status, "callback": callername}
for kw, v in kwargs.iteritems():
ctx[kw] = v
return ctx
def ajax_response(request, status="ok", respmsg=None,
url=None, ajaxnav=False, norefresh=False,
template=None, **kwargs):
"""Ajax response shortcut
Simple shortcut that sends an JSON response. If a template is
provided, a 'content' field will be added to the response,
containing the result of this template rendering.
:param request: a Request object
:param status: the response status ('ok' or 'ko)
:param respmsg: the message that will displayed in the interface
:param url: url to display after receiving this response
:param ajaxnav:
:param norefresh: do not refresh the page after receiving this response
:param template: eventual template's path
:param kwargs: dict used for template rendering
"""
ctx = {}
for k, v in kwargs.iteritems():
ctx[k] = v
if template is not None:
content = _render_to_string(request, template, ctx)
elif "content" in kwargs:
content = kwargs["content"]
else:
content = ""
jsonctx = {"status": status, "content": content}
if respmsg is not None:
jsonctx["respmsg"] = respmsg
if ajaxnav:
jsonctx["ajaxnav"] = True
if url is not None:
jsonctx["url"] = url
jsonctx["norefresh"] = norefresh
return JsonResponse(jsonctx)
def render_to_json_response(context, **response_kwargs):
"""Simple shortcut to render a JSON response.
:param dict context: response content
:return: ``HttpResponse`` object
"""
data = json.dumps(context)
response_kwargs['content_type'] = 'application/json'
return HttpResponse(data, **response_kwargs)
def static_url(path):
"""Returns the correct static url for a given file
:param path: the targeted static media
"""
if path.startswith("/"):
path = path[1:]
return "%s%s" % (settings.STATIC_URL, path)
def size2integer(value):
"""Try to convert a string representing a size to an integer value
in bytes.
Supported formats:
* K|k for KB
* M|m for MB
* G|g for GB
:param value: the string to convert
:return: the corresponding integer value
"""
m = re.match("(\d+)\s*(\w+)", value)
if m is None:
if re.match("\d+", value):
return int(value)
return 0
if m.group(2)[0] in ["K", "k"]:
return int(m.group(1)) * 2 ** 10
if m.group(2)[0] in ["M", "m"]:
return int(m.group(1)) * 2 ** 20
if m.group(2)[0] in ["G", "g"]:
return int(m.group(1)) * 2 ** 30
return 0
@login_required
def topredirection(request):
"""Simple view to redirect the request when no application is specified.
The default "top redirection" can be specified in the *Admin >
Settings* panel. It is the application that will be
launched. Those not allowed to access the application will be
redirected to their preferences page.
This feature only applies to simple users.
:param request: a Request object
"""
from modoboa.lib import parameters
from modoboa.core.extensions import exts_pool
if request.user.group == 'SimpleUsers':
topredir = parameters.get_admin("DEFAULT_TOP_REDIRECTION", app="core")
if topredir != "user":
infos = exts_pool.get_extension_infos(topredir)
path = infos["url"] if infos["url"] else infos["name"]
else:
path = reverse("core:user_index")
else:
# FIXME
path = reverse("modoboa_admin:domain_list")
return HttpResponseRedirect(path)
class NavigationParameters(object):
"""
Just a simple object to manipulate navigation parameters.
"""
def __init__(self, request, sessionkey):
self.request = request
self.sessionkey = sessionkey
self.parameters = [('pattern', '', True),
('criteria', 'from_addr', False)]
def __getitem__(self, key):
"""Retrieve an item."""
if self.sessionkey not in self.request.session:
raise KeyError
return self.request.session[self.sessionkey][key]
def __contains__(self, key):
"""Check if key is present."""
if self.sessionkey not in self.request.session:
return False
return key in self.request.session[self.sessionkey]
def __setitem__(self, key, value):
"""Set a new item."""
self.request.session[self.sessionkey][key] = value
def _store_page(self):
"""Specific method to store the current page."""
self["page"] = int(self.request.GET.get("page", 1))
def store(self):
"""Store navigation parameters into session.
"""
if self.sessionkey not in self.request.session:
self.request.session[self.sessionkey] = {}
self._store_page()
navparams = self.request.session[self.sessionkey]
navparams["order"] = self.request.GET.get("sort_order", "-date")
for param, defvalue, escape in self.parameters:
value = self.request.GET.get(param, defvalue)
if value is None:
if param in navparams:
del navparams[param]
continue
navparams[param] = re.escape(value) if escape else value
self.request.session.modified = True
def get(self, param, default_value=None):
"""Retrieve a navigation parameter.
Just a simple getter to avoid using the full key name to
access a parameter.
:param str param: parameter name
:param defaultvalue: default value if none is found
:return: parameter's value
"""
if self.sessionkey not in self.request.session:
return default_value
return self.request.session[self.sessionkey].get(param, default_value)
def remove(self, param):
"""Remove a navigation parameter from session.
:param str param: parameter name
"""
navparams = self.request.session[self.sessionkey]
if param in navparams:
del navparams[param]
| mehulsbhatt/modoboa | modoboa/lib/web_utils.py | Python | isc | 7,610 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^system_settings$', views.system_settings, name='system_settings')
]
| rackerlabs/django-DefectDojo | dojo/system_settings/urls.py | Python | bsd-3-clause | 149 |
from __future__ import absolute_import
from sentry.testutils.cases import RuleTestCase
from sentry.rules.conditions.tagged_event import TaggedEventCondition, MatchType
class TaggedEventConditionTest(RuleTestCase):
rule_cls = TaggedEventCondition
def get_event(self):
event = self.event
event.data['tags'] = (
('logger', 'sentry.example'),
('logger', 'foo.bar'),
('notlogger', 'sentry.other.example'),
('notlogger', 'bar.foo.baz'),
)
return event
def test_render_label(self):
rule = self.get_rule({
'match': MatchType.EQUAL,
'key': u'\xc3',
'value': u'\xc4',
})
assert rule.render_label() == u'An event\'s tags match \xc3 equals \xc4'
def test_equals(self):
event = self.get_event()
rule = self.get_rule({
'match': MatchType.EQUAL,
'key': 'LOGGER',
'value': 'sentry.example',
})
self.assertPasses(rule, event)
rule = self.get_rule({
'match': MatchType.EQUAL,
'key': 'logger',
'value': 'sentry.other.example',
})
self.assertDoesNotPass(rule, event)
def test_does_not_equal(self):
event = self.get_event()
rule = self.get_rule({
'match': MatchType.NOT_EQUAL,
'key': 'logger',
'value': 'sentry.example',
})
self.assertDoesNotPass(rule, event)
rule = self.get_rule({
'match': MatchType.NOT_EQUAL,
'key': 'logger',
'value': 'sentry.other.example',
})
self.assertPasses(rule, event)
def test_starts_with(self):
event = self.get_event()
rule = self.get_rule({
'match': MatchType.STARTS_WITH,
'key': 'logger',
'value': 'sentry.',
})
self.assertPasses(rule, event)
rule = self.get_rule({
'match': MatchType.STARTS_WITH,
'key': 'logger',
'value': 'bar.',
})
self.assertDoesNotPass(rule, event)
def test_ends_with(self):
event = self.get_event()
rule = self.get_rule({
'match': MatchType.ENDS_WITH,
'key': 'logger',
'value': '.example',
})
self.assertPasses(rule, event)
rule = self.get_rule({
'match': MatchType.ENDS_WITH,
'key': 'logger',
'value': '.foo',
})
self.assertDoesNotPass(rule, event)
def test_contains(self):
event = self.get_event()
rule = self.get_rule({
'match': MatchType.CONTAINS,
'key': 'logger',
'value': 'sentry',
})
self.assertPasses(rule, event)
rule = self.get_rule({
'match': MatchType.CONTAINS,
'key': 'logger',
'value': 'bar.foo',
})
self.assertDoesNotPass(rule, event)
def test_does_not_contain(self):
event = self.get_event()
rule = self.get_rule({
'match': MatchType.NOT_CONTAINS,
'key': 'logger',
'value': 'sentry',
})
self.assertDoesNotPass(rule, event)
rule = self.get_rule({
'match': MatchType.NOT_CONTAINS,
'key': 'logger',
'value': 'bar.foo',
})
self.assertPasses(rule, event)
| alexm92/sentry | tests/sentry/rules/conditions/test_tagged_event.py | Python | bsd-3-clause | 3,462 |
# -*- coding: UTF-8 -*-
from insights.client.constants import InsightsConstants
from insights.client.phase.v1 import pre_update
from mock.mock import patch
from pytest import raises
def patch_insights_config(old_function):
patcher = patch("insights.client.phase.v1.InsightsConfig",
**{"return_value.load_all.return_value.auto_config": False,
"return_value.load_all.return_value.version": False,
"return_value.load_all.return_value.validate": False,
"return_value.load_all.return_value.enable_schedule": False,
"return_value.load_all.return_value.disable_schedule": False,
"return_value.load_all.return_value.analyze_container": False,
"return_value.load_all.return_value.test_connection": False,
"return_value.load_all.return_value.support": False,
"return_value.load_all.return_value.diagnosis": False,
"return_value.load_all.return_value.checkin": True})
return patcher(old_function)
@patch("insights.client.phase.v1.InsightsClient", **{"return_value.checkin.return_value": True})
@patch_insights_config
def test_checkin_success(insights_config, insights_client):
"""
InsightsSupport is constructed with InsightsConfig and collect_support_info is called.
"""
with raises(SystemExit) as exc_info:
pre_update()
insights_client.return_value.checkin.assert_called_once_with()
assert exc_info.value.code == InsightsConstants.sig_kill_ok
@patch("insights.client.phase.v1.InsightsClient", **{"return_value.checkin.return_value": False})
@patch_insights_config
def test_checkin_failure(insights_config, insights_client):
"""
Support collection replaces the normal client run.
"""
with raises(SystemExit) as exc_info:
pre_update()
insights_client.return_value.checkin.assert_called_once_with()
assert exc_info.value.code == InsightsConstants.sig_kill_bad
| RedHatInsights/insights-core | insights/tests/client/phase/test_pre_update_checkin.py | Python | apache-2.0 | 2,056 |
import os
import json
import time
from xudd.actor import Actor
class Controller(Actor):
config = None
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.message_routing.update({
"setup": self.setup,
"stop_gui": self.stop_gui,
"start_gui": self.start_gui,
"read_config": self.read_config,
"write_config": self.write_config,
})
self.config = os.path.expanduser("~/.muon")
def unpack(self, packet):
for name, var in packet.items():
setattr(self, name, var)
def setup(self, message):
self.unpack(message.body)
self.hive.send_message(
to="muon",
directive="read_config"
)
def read_config(self, message):
""" Reads config """
try:
config = json.loads(open(self.config).read())
except (ValueError, IOError):
# json's bad
config = dict()
self.hive.send_message(
to="pump",
directive="setup",
body=config
)
def write_config(self, message):
""" Writes config back to file """
config = open(self.config, "w")
jconfig = json.dumps(message.body)
config.write(jconfig)
config.close()
def start_gui(self, message):
self.gui.start_gui()
time.sleep(1.25) # time it may take to get the GUI up
self.gui.draw_inbox()
def stop_gui(self, message):
self.gui.quit()
| xray7224/Muon | src/controller.py | Python | gpl-3.0 | 1,584 |
import json
import logging
import time
import requests
import demistomock as demisto
import resilient
from CommonServerPython import *
''' IMPORTS '''
logging.basicConfig()
# disable insecure warnings
requests.packages.urllib3.disable_warnings()
try:
# disable 'warning' logs from 'resilient.co3'
logging.getLogger('resilient.co3').setLevel(logging.ERROR)
except Exception:
# client with no co3 instance should pass this exception
pass
if not demisto.params()['proxy']:
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
''' GLOBAL VARS '''
URL = demisto.params()['server'][:-1] if demisto.params()['server'].endswith('/') else demisto.params()['server']
# Remove the http/s from the url (It's added automatically later)
URL = URL.replace('http://', '').replace('https://', '')
# Split the URL into two parts hostname & port
SERVER, PORT = URL.rsplit(":", 1)
ORG_NAME = demisto.params()['org']
USERNAME = demisto.params().get('credentials', {}).get('identifier')
PASSWORD = demisto.params().get('credentials', {}).get('password')
API_KEY_ID = demisto.params().get('api_key_id')
API_KEY_SECRET = demisto.params().get('api_key_secret')
USE_SSL = not demisto.params().get('insecure', False)
FETCH_TIME = demisto.params().get('fetch_time', '')
TIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
if FETCH_TIME:
if FETCH_TIME[-1] != 'Z':
FETCH_TIME = FETCH_TIME + 'Z'
INCIDENT_TYPE_DICT = {
'CommunicationError': 17,
'DenialOfService': 21,
'ImproperDisposal:DigitalAsset': 6,
'ImproperDisposal:documents/files': 7,
'LostDocuments/files/records': 4,
'LostPC/laptop/tablet': 3,
'LostPDA/smartphone': 1,
'LostStorageDevice/media': 8,
'Malware': 19,
'NotAnIssue': 23,
'Other': 18,
'Phishing': 22,
'StolenDocuments/files/records': 11,
'StolenPC/laptop/tablet': 12,
'StolenPDA/Smartphone': 13,
'StolenStorageDevice/media': 14,
'SystemIntrusion': 20,
'TBD/Unknown': 16,
'Vendor/3rdPartyError': 15
}
NIST_DICT = {
'Attrition': 2,
'E-mail': 4,
'External/RemovableMedia': 1,
'Impersonation': 5,
'ImproperUsage': 6,
'Loss/TheftOfEquipment': 7,
'Other': 8,
'Web': 3
}
NIST_ID_DICT = {
2: 'Attrition',
4: 'E-mail',
1: 'External/RemovableMedia',
5: 'Impersonation',
6: 'ImproperUsage',
7: 'Loss/TheftOfEquipment',
8: 'Other',
3: 'Web'
}
SEVERITY_CODE_DICT = {
4: 'Low',
5: 'Medium',
6: 'High'
}
RESOLUTION_DICT = {
7: 'Unresolved',
8: 'Duplicate',
9: 'Not an Issue',
10: 'Resolved'
}
RESOLUTION_TO_ID_DICT = {
'Unresolved': 7,
'Duplicate': 8,
'Not an Issue': 9,
'Resolved': 10
}
EXP_TYPE_ID_DICT = {
1: 'Unknown',
2: 'ExternalParty',
3: 'Individual'
}
''' HELPER FUNCTIONS '''
def normalize_timestamp(timestamp):
''' Converts epoch timestamp to human readable timestamp '''
return datetime.fromtimestamp(timestamp / 1000.0).strftime('%Y-%m-%dT%H:%M:%SZ')
def prettify_incidents(client, incidents):
users = get_users(client)
phases = get_phases(client)['entities']
for incident in incidents:
incident['id'] = str(incident['id'])
if isinstance(incident['description'], unicode):
incident['description'] = incident['description'].replace('<div>', '').replace('</div>', '')
incident['discovered_date'] = normalize_timestamp(incident['discovered_date'])
incident['created_date'] = normalize_timestamp(incident['create_date'])
incident.pop('create_date', None)
incident.pop('inc_training', None)
incident.pop('plan_status', None)
for user in users:
if incident['owner_id'] == user['id']:
incident['owner'] = user['fname'] + ' ' + user['lname']
incident.pop('owner_id', None)
break
for phase in phases:
if incident['phase_id'] == phase['id']:
incident['phase'] = phase['name']
incident.pop('phase_id', None)
break
if incident['severity_code']:
incident['severity'] = SEVERITY_CODE_DICT[incident['severity_code']]
incident.pop('severity_code', None)
start_date = incident.get('start_date')
if start_date:
incident['date_occurred'] = normalize_timestamp(start_date)
incident.pop('start_date', None)
due_date = incident.get('due_date')
if due_date:
incident['due_date'] = normalize_timestamp(due_date)
negative_pr = incident.get('negative_pr_likely')
if negative_pr:
incident['negative_pr'] = negative_pr
incident.pop('negative_pr_likely', None)
exposure_type_id = incident.get('exposure_type_id')
if exposure_type_id:
incident['exposure_type'] = EXP_TYPE_ID_DICT[exposure_type_id]
incident.pop('exposure_type_id', None)
nist_attack_vectors = incident.get('nist_attack_vectors')
if nist_attack_vectors:
translated_nist = []
for vector in nist_attack_vectors:
translated_nist.append(NIST_ID_DICT[vector])
incident['nist_attack_vectors'] = translated_nist
return incidents
''' FUNCTIONS '''
def search_incidents_command(client, args):
incidents = search_incidents(client, args)
entry = None
if incidents:
pretty_incidents = prettify_incidents(client, incidents)
result_incidents = createContext(pretty_incidents, id=None, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'Resilient.Incidents(val.Id && val.Id === obj.Id)': result_incidents
}
title = 'Resilient Systems Incidents'
entry = {
'Type': entryTypes['note'],
'Contents': incidents,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, result_incidents,
headers=['Id', 'Name', 'CreatedDate', 'DiscoveredDate', 'Owner', 'Phase'],
removeNull=True),
'EntryContext': ec
}
return entry
else:
return 'No results found.'
def search_incidents(client, args):
conditions = []
if 'severity' in args:
value = []
severity = args['severity'].split(',')
if 'Low' in severity:
value.append(50)
if 'Medium' in severity:
value.append(51)
if 'High' in severity:
value.append(52)
if not value:
raise Exception('Severity should be given in capital case and comma separated, e.g. Low,Medium,High')
conditions.append({
'field_name': 'severity_code',
'method': 'in',
'value': value
})
if 'date-created-before' in args:
value = date_to_timestamp(args['date-created-before'], date_format='%Y-%m-%dT%H:%M:%SZ')
conditions.append({
'field_name': 'create_date',
'method': 'lte',
'value': value
})
elif 'date-created-after' in args:
value = date_to_timestamp(args['date-created-after'], date_format='%Y-%m-%dT%H:%M:%SZ')
conditions.append({
'field_name': 'create_date',
'method': 'gte',
'value': value
})
elif 'date-created-within-the-last' in args:
if 'timeframe' not in args:
raise Exception('Timeframe was not given.')
within_the_last = int(args['date-created-within-the-last'])
now = int(time.time())
timeframe = args['timeframe']
if timeframe == 'days':
from_time = now - (60 * 60 * 24 * within_the_last)
elif timeframe == 'hours':
from_time = now - (60 * 60 * within_the_last)
elif timeframe == 'minutes':
from_time = now - (60 * within_the_last)
conditions.extend((
{
'field_name': 'create_date',
'method': 'lte',
'value': now * 1000
},
{
'field_name': 'create_date',
'method': 'gte',
'value': from_time * 1000
}))
if 'date-occurred-before' in args:
value = date_to_timestamp(args['date-occurred-before'], date_format='%Y-%m-%dT%H:%M:%SZ')
conditions.append({
'field_name': 'start_date',
'method': 'lte',
'value': value
})
elif 'date-occurred-after' in args:
value = date_to_timestamp(args['date-occurred-after'], date_format='%Y-%m-%dT%H:%M:%SZ')
conditions.append({
'field_name': 'start_date',
'method': 'gte',
'value': value
})
elif 'date-occurred-within-the-last' in args:
if 'timeframe' not in args:
raise Exception('Timeframe was not given.')
within_the_last = int(args['date-occurred-within-the-last'])
now = int(time.time())
timeframe = args['timeframe']
if timeframe == 'days':
from_time = now - (60 * 60 * 24 * within_the_last)
elif timeframe == 'hours':
from_time = now - (60 * 60 * within_the_last)
elif timeframe == 'minutes':
from_time = now - (60 * within_the_last)
conditions.extend((
{
'field_name': 'start_date',
'method': 'lte',
'value': now * 1000
},
{
'field_name': 'start_date',
'method': 'gte',
'value': from_time * 1000
}))
if 'incident-type' in args:
type_id = INCIDENT_TYPE_DICT[args['incident-type']]
conditions.append({
'field_name': 'incident_type_ids',
'method': 'contains',
'value': [type_id]
})
if 'nist' in args:
nist = NIST_DICT[args['nist']]
conditions.append({
'field_name': 'nist_attack_vectors',
'method': 'contains',
'value': [nist]
})
if 'status' in args:
status = 'A' if args['status'] == 'Active' else 'C'
conditions.append({
'field_name': 'plan_status',
'method': 'in',
'value': [status]
})
if 'due-in' in args:
if 'timeframe' not in args:
raise Exception('Timeframe was not given.')
within_the_last = int(args['due-in'])
now = int(time.time())
timeframe = args['timeframe']
if timeframe == 'days':
to_time = now + (60 * 60 * 24 * within_the_last)
elif timeframe == 'hours':
to_time = now + (60 * 60 * within_the_last)
elif timeframe == 'minutes':
to_time = now + (60 * within_the_last)
conditions.extend((
{
'field_name': 'due_date',
'method': 'lte',
'value': to_time * 1000
},
{
'field_name': 'due_date',
'method': 'gte',
'value': now * 1000
}))
data = {
'filters': [{
'conditions': conditions
}]
}
response = client.post('/incidents/query', data)
return response
def extract_data_form_other_fields_argument(other_fields, incident, changes):
"""Extracts the values from other-field argument and build a json object in ibm format to update an incident.
Args:
other_fields (str): Contains the field that should be changed and the new value ({"name": {"text": "The new name"}}).
incident (dict): Contains the old value of the field that should be changed ({"name": "The old name"}).
changes (list): Contains the fields that should be changed with the old and new values in IBM format
([{'field': {'name': 'confirmed'}, 'old_value': {'boolean': 'false'}, 'new_value': {'boolean': 'true'},
{'field': {'name': 'name'}, 'old_value': {'text': 'The old name'}, 'new_value': {'text': 'The new name'}}]).
"""
try:
other_fields_json = json.loads(other_fields)
except Exception as e:
raise Exception('The other_fields argument is not a valid json. ' + str(e))
for field_name, field_value in other_fields_json.items():
changes.append(
{
'field': {'name': field_name},
# The format should be {type: value}.
# Because the type is not returned from the API we take the type from the new value.
'old_value': {list(field_value.keys())[0]: incident[field_name]},
'new_value': field_value
}
)
def update_incident_command(client, args):
if len(args.keys()) == 1:
raise Exception('No fields to update were given')
incident_id = args['incident-id']
incident = get_incident(client, incident_id, True)
changes = []
if 'severity' in args:
old_value = incident['severity_code']
severity = args['severity']
if severity == 'Low':
new_value = 4
elif severity == 'Medium':
new_value = 5
elif severity == 'High':
new_value = 6
changes.append({
'field': 'severity_code',
'old_value': {
'id': old_value
},
'new_value': {
'id': new_value
}
})
if 'owner' in args:
users = get_users(client)
old_value = incident['owner_id']
full_name = args['owner'].split(' ')
first_name, last_name = full_name[0], full_name[1]
new_value = -1
for user in users:
if first_name == user['fname'] and last_name == user['lname']:
new_value = user['id']
break
if new_value == -1:
raise Exception('User was not found')
changes.append({
'field': 'owner_id',
'old_value': {
'id': old_value
},
'new_value': {
'id': new_value
}
})
if 'incident-type' in args:
old_value = incident['incident_type_ids']
type_id = INCIDENT_TYPE_DICT[args['incident-type']]
new_value_list = old_value[:]
new_value_list.append(type_id)
changes.append({
'field': 'incident_type_ids',
'old_value': {
'ids': old_value
},
'new_value': {
'ids': new_value_list
}
})
if 'nist' in args:
old_value = incident['nist_attack_vectors']
nist_id = NIST_DICT[args['nist']]
new_value_list = old_value[:]
new_value_list.append(nist_id)
changes.append({
'field': 'nist_attack_vectors',
'old_value': {
'ids': old_value
},
'new_value': {
'ids': new_value_list
}
})
if 'resolution' in args:
old_value = incident['resolution_id']
new_value = RESOLUTION_TO_ID_DICT[args['resolution']]
changes.append({
'field': 'resolution_id',
'old_value': {
'id': old_value
},
'new_value': {
'id': new_value
}
})
if 'resolution-summary' in args:
old_summary = incident['resolution_summary']
new_summary = args['resolution-summary']
changes.append({
'field': 'resolution_summary',
'old_value': {
'textarea': old_summary
},
'new_value': {
'textarea': {
'format': 'html',
'content': new_summary
}
}
})
if 'description' in args:
old_description = incident['description']
new_description = args['description']
changes.append({
'field': 'description',
'old_value': {
'textarea': old_description
},
'new_value': {
'textarea': {
'format': 'html',
'content': new_description
}
}
})
if 'name' in args:
old_name = incident['name']
new_name = args['name']
changes.append({
'field': 'name',
'old_value': {
'text': old_name
},
'new_value': {
'text': new_name
}
})
if args.get('other-fields'):
extract_data_form_other_fields_argument(args.get('other-fields'), incident, changes)
data = {
'changes': changes
}
response = update_incident(client, incident_id, data)
if response.status_code == 200:
return 'Incident ' + str(args['incident-id']) + ' was updated successfully.'
def update_incident(client, incident_id, data):
response = client.patch('/incidents/' + str(incident_id), data)
return response
def get_incident_command(client, incident_id):
incident = get_incident(client, incident_id)
wanted_keys = ['create_date', 'discovered_date', 'description', 'due_date', 'id', 'name', 'owner_id',
'phase_id', 'severity_code', 'confirmed', 'employee_involved', 'negative_pr_likely',
'confirmed', 'start_date', 'due_date', 'negative_pr_likely', 'reporter', 'exposure_type_id',
'nist_attack_vectors']
pretty_incident = dict((k, incident[k]) for k in wanted_keys if k in incident)
if incident['resolution_id']:
pretty_incident['resolution'] = RESOLUTION_DICT[incident['resolution_id']]
if incident['resolution_summary']:
pretty_incident['resolution_summary'] = incident['resolution_summary'].replace('<div>', '').replace('</div>',
'')
pretty_incident = prettify_incidents(client, [pretty_incident])
result_incident = createContext(pretty_incident, id=None, keyTransform=underscoreToCamelCase, removeNull=True)
ec = {
'Resilient.Incidents(val.Id && val.Id === obj.Id)': result_incident
}
hr_incident = result_incident[:]
if hr_incident[0].get('NistAttackVectors'):
nist_vectors_str = ''
for vector in hr_incident[0].get('NistAttackVectors', []):
nist_vectors_str += vector + '\n'
hr_incident[0]['NistAttackVectors'] = nist_vectors_str
title = 'IBM Resilient Systems incident ID ' + str(incident_id)
entry = {
'Type': entryTypes['note'],
'Contents': incident,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, hr_incident,
headers=['Id', 'Name', 'Description', 'NistAttackVectors', 'Phase',
'Resolution', 'ResolutionSummary', 'Owner',
'CreatedDate', 'DateOccurred', 'DiscoveredDate', 'DueDate',
'NegativePr', 'Confirmed', 'ExposureType',
'Severity', 'Reporter']),
'EntryContext': ec
}
return entry
def get_incident(client, incident_id, content_format=False):
url = '/incidents/' + str(incident_id)
if content_format:
url += '?text_content_output_format=objects_convert_html'
response = client.get(url)
return response
def list_open_incidents(client):
response = client.get('/incidents/open')
return response
def get_members_command(client, incident_id):
response = get_members(client, incident_id)['members']
incident = get_incident(client, incident_id)
response.append(incident['owner_id'])
users = get_users(client)
members = []
for user in users:
if user['id'] in response:
members.append({
'FirstName': user['fname'],
'LastName': user['lname'],
'ID': user['id'],
'Email': user['email']
})
ec = {
'Resilient.Incidents(val.Id && val.Id === obj.Id)': {
'Id': incident_id,
'Members': members
}
}
title = 'Members of incident ' + incident_id
entry = {
'Type': entryTypes['note'],
'Contents': members,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, members, ['ID', 'LastName', 'FirstName', 'Email']),
'EntryContext': ec
}
return entry
def get_members(client, incident_id):
response = client.get('/incidents/' + incident_id + '/members')
return response
def get_users_command(client):
response = get_users(client)
users = []
for user in response:
users.append({
'FirstName': user['fname'],
'LastName': user['lname'],
'ID': user['id'],
'Email': user['email']
})
title = 'IBM Resilient Systems Users'
entry = {
'Type': entryTypes['note'],
'Contents': users,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, users, ['ID', 'LastName', 'FirstName', 'Email'])
}
return entry
def get_users(client):
response = client.get('/users')
return response
def get_phases(client):
response = client.get('/phases')
return response
def get_tasks_command(client, incident_id):
response = get_tasks(client, incident_id)
if response:
tasks = []
for task in response:
task_object = {}
incident_name = task['inc_name']
task_object['ID'] = task['id']
task_object['Name'] = task['name']
if task['due_date']:
task_object['DueDate'] = normalize_timestamp(task['due_date'])
task_object['Status'] = 'Open' if task['status'] == 'O' else 'Closed'
task_object['Required'] = task['required']
if task['form']:
task_object['Form'] = task['form']
if task['user_notes']:
task_object['UserNotes'] = task['user_notes']
task_object['Creator'] = task.get('creator_principal', {}).get('display_name')
task_object['Category'] = task['cat_name']
if task['instr_text']:
task_object['Instructions'] = task['instr_text']
tasks.append(task_object)
ec = {
'Resilient.Incidents(val.Id && val.Id === obj.Id)': {
'Id': incident_id,
'Name': incident_name,
'Tasks': tasks
}
}
title = 'Incident ' + incident_id + ' tasks'
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, tasks,
['ID', 'Name', 'Category', 'Form', 'Status', 'DueDate', 'Instructions',
'UserNotes', 'Required', 'Creator']),
'EntryContext': ec
}
return entry
else:
return 'No tasks found for this incident.'
def get_tasks(client, incident_id):
response = client.get('/incidents/' + incident_id + '/tasks')
return response
def set_member_command(client, incident_id, members):
members = [int(x) for x in members.split(',')]
incident = get_incident(client, incident_id)
incident_version = incident['vers']
data = {
'vers': incident_version,
'members': members
}
response = set_member(client, incident_id, data)
users = get_users(client)
entry = {}
if response:
for user in users:
if user['id'] in members:
if isinstance(response, dict):
response.update({
'FirstName': user['fname'],
'LastName': user['lname'],
'ID': user['id'],
'Email': user['email']
})
else:
response.append({
'FirstName': user['fname'],
'LastName': user['lname'],
'ID': user['id'],
'Email': user['email']
})
ec = {
'Resilient.Incidents(val.Id && val.Id === obj.Id)': {
'Id': incident_id,
'Members': response
}
}
title = 'Members of incident ' + incident_id
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, response),
'EntryContext': ec
}
return entry
def set_member(client, incident_id, data):
response = client.put('/incidents/' + incident_id + '/members', data)
return response
def close_incident_command(client, incident_id):
incident = get_incident(client, incident_id)
if not incident['resolution_id'] or not incident['resolution_summary']:
return 'Resolution and resolution summary of the incident should be updated before closing an incident.'
response = close_incident(client, incident_id, incident)
if response.status_code == 200:
return 'Incident ' + incident_id + ' was closed.'
def close_incident(client, incident_id, incident):
old_status = incident['plan_status']
data = {
'changes': [{
'field': 'plan_status',
'old_value': {
'text': old_status
},
'new_value': {
'text': 'C'
}
}]
}
return update_incident(client, incident_id, data)
def create_incident_command(client, args):
incident_name = args['name']
data = {
"name": incident_name,
"discovered_date": 0
}
response = create_incident(client, data)
hr = {
'ID': response['id'],
'Name': incident_name
}
ec = {
'Resilient.Incidents(val.Id && val.Id === obj.Id)': {
'Id': str(response['id']),
'Name': incident_name
}
}
title = 'Incident ' + incident_name + ' was created'
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, hr),
'EntryContext': ec
}
return entry
def create_incident(client, data):
response = client.post('/incidents', data)
return response
def incident_artifacts_command(client, incident_id):
response = incident_artifacts(client, incident_id)
if response:
users = get_users(client)
ec_artifacts = []
hr_artifacts = []
for artifact in response:
incident_name = artifact['inc_name']
artifact_object = {
'ID': artifact['id'],
'Type': get_artifact_type(client, artifact['type']),
'Value': artifact['value'],
'CreatedDate': normalize_timestamp(artifact['created']),
'Creator': artifact['creator']['fname'] + artifact['creator']['lname']
}
if artifact['description']:
artifact_object['Description'] = artifact['description']
hr_artifact = dict(artifact_object)
if artifact['attachment']:
artifact_object['Attachments'] = {}
attachment_string = ''
artifact_object['Attachments']['ID'] = artifact['attachment']['id']
attachment_string += 'ID: ' + str(artifact_object['Attachments']['ID']) + '\n'
artifact_object['Attachments']['Name'] = artifact['attachment']['name']
attachment_string += 'Name: ' + artifact_object['Attachments']['Name'] + '\n'
artifact_object['Attachments']['CreatedDate'] = normalize_timestamp(artifact['attachment']['created'])
attachment_string += 'Created Date: ' + artifact_object['Attachments']['CreatedDate'] + '\n'
artifact_object['Attachments']['ContentType'] = artifact['attachment']['content_type']
attachment_string += 'Content Type : ' + artifact_object['Attachments']['ContentType'] + '\n'
artifact_object['Attachments']['Size'] = artifact['attachment']['size']
attachment_string += 'Size: ' + str(artifact_object['Attachments']['Size']) + '\n'
creator_id = artifact['attachment']['creator_id']
for user in users:
if creator_id == user['id']:
artifact_object['Attachments']['Creator'] = user['fname'] + ' ' + user['lname']
attachment_string += 'Creator: ' + artifact_object['Attachments']['Creator']
break
hr_artifact['Attachments'] = attachment_string
hr_artifacts.append(hr_artifact)
ec_artifacts.append(artifact_object)
ec = {
'Resilient.Incidents(val.Id && val.Id === obj.Id)': {
'Id': incident_id,
'Name': incident_name,
'Artifacts': ec_artifacts
}
}
title = 'Incident ' + incident_id + ' artifacts'
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, hr_artifacts,
headers=['ID', 'Value', 'Description', 'CreatedDate', 'Creator']),
'EntryContext': ec
}
return entry
else:
return 'No artifacts found.'
def incident_artifacts(client, incident_id):
response = client.get('/incidents/' + incident_id + '/artifacts')
return response
def get_artifact_type(client, artifact_id):
response = client.get('/artifact_types/' + str(artifact_id))
return response['name']
def incident_attachments_command(client, incident_id):
response = incident_attachments(client, incident_id)
if response:
attachments = []
users = get_users(client)
for attachment in response:
incident_name = attachment['inc_name']
attachment_object = {}
attachment_object['ID'] = attachment['id']
attachment_object['Name'] = attachment['name']
attachment_object['CreatedDate'] = normalize_timestamp(attachment['created'])
attachment_object['Size'] = attachment['size']
attachment_object['ContentType'] = attachment['content_type']
attachment_object['Name'] = attachment['name']
for user in users:
if attachment['creator_id'] == user['id']:
attachment_object['Creator'] = user['fname'] + ' ' + user['lname']
if attachment['inc_owner'] == user['id']:
incident_owner = user['fname'] + ' ' + user['lname']
attachments.append(attachment_object)
ec = {
'Resilient.Incidents(val.Id && val.Id === obj.Id)': {
'Id': incident_id,
'Name': incident_name,
'Owner': incident_owner,
'Attachments': attachments
}
}
title = 'Incident ' + incident_id + ' attachments'
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, attachments),
'EntryContext': ec
}
return entry
else:
return 'No attachments found.'
def incident_attachments(client, incident_id):
response = client.get('/incidents/' + incident_id + '/attachments')
return response
def related_incidents_command(client, incident_id):
response = related_incidents(client, incident_id)['incidents']
if response:
ec_incidents = []
hr_incidents = []
for incident in response:
incident_object = {
'ID': incident['id'],
'Name': incident['name'],
'Status': 'Active' if incident['plan_status'] == 'A' else 'Closed',
'CreatedDate': normalize_timestamp(incident['create_date']),
}
hr_incident = dict(incident_object)
if incident['artifacts']:
hr_incident['Artifacts'] = ''
artifacts = []
for artifact in incident['artifacts']:
artifact_object = {}
artifact_string = ''
artifact_object['ID'] = artifact['id']
artifact_string += 'ID: ' + str(artifact_object['ID']) + '\n'
artifact_object['CreatedDate'] = normalize_timestamp(artifact['created'])
artifact_string += 'Created Date: ' + artifact_object['CreatedDate'] + '\n'
if artifact['description']:
artifact_object['Description'] = artifact['description']
artifact_string += 'Description: ' + artifact_object['Description'] + '\n'
artifact_object['Creator'] = artifact['creator']['fname'] + ' ' + artifact['creator']['lname']
artifact_string += 'Creator: ' + artifact_object['Creator'] + '\n'
hr_incident['Artifacts'] += artifact_string
artifacts.append(artifact_object)
incident_object['Artifacts'] = artifacts
hr_incidents.append(hr_incident)
ec_incidents.append(incident_object)
ec = {
'Resilient.Incidents(val.Id && val.Id === obj.Id)': {
'Id': incident_id,
'Related': ec_incidents
}
}
title = 'Incident ' + incident_id + ' related incidents'
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown(title, hr_incidents),
'EntryContext': ec
}
return entry
else:
return 'No related incidents found.'
def related_incidents(client, incident_id):
response = client.get('/incidents/' + incident_id + '/related_ex?want_artifacts=true')
return response
def add_note_command(client, incident_id, note):
body = {
'text': {
'format': 'text',
'content': note
}
}
response = client.post('/incidents/' + str(incident_id) + '/comments', body)
ec = {
'Resilient.incidentNote(val.Id && val.Id === obj.Id)': response
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'EntryContext': ec,
'ReadableContentsFormat': formats['text'],
'HumanReadable': 'The note was added successfully to incident {0}'.format(incident_id)
}
return entry
def add_artifact_command(client, incident_id, artifact_type, artifact_value, artifact_description):
body = {
'type': artifact_type,
'value': artifact_value,
'description': {
'format': 'text',
'content': artifact_description
}
}
response = client.post('/incidents/' + str(incident_id) + '/artifacts', body)
ec = {
'Resilient.incidentArtifact(val.Id && val.Id === obj.Id)': response
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'EntryContext': ec,
'ReadableContentsFormat': formats['text'],
'HumanReadable': 'The artifact was added successfully to incident {0}'.format(incident_id)
}
return entry
def fetch_incidents(client):
last_run = demisto.getLastRun() and demisto.getLastRun().get('time')
if not last_run:
last_run = date_to_timestamp(FETCH_TIME, date_format='%Y-%m-%dT%H:%M:%SZ')
args = {'date-created-after': FETCH_TIME}
else:
args = {'date-created-after': normalize_timestamp(last_run)}
resilient_incidents = search_incidents(client, args)
incidents = []
if resilient_incidents:
last_incident_creation_time = resilient_incidents[0].get('create_date') # the first incident's creation time
for incident in resilient_incidents:
incident_creation_time = incident.get('create_date')
if incident_creation_time > last_run: # timestamp in milliseconds
artifacts = incident_artifacts(client, str(incident.get('id', '')))
if artifacts:
incident['artifacts'] = artifacts
attachments = incident_attachments(client, str(incident.get('id', '')))
if attachments:
incident['attachments'] = attachments
if isinstance(incident.get('description'), unicode):
incident['description'] = incident['description'].replace('<div>', '').replace('</div>', '')
incident['discovered_date'] = normalize_timestamp(incident.get('discovered_date'))
incident['create_date'] = normalize_timestamp(incident_creation_time)
demisto_incident = dict() # type: dict
demisto_incident['name'] = 'IBM Resilient Systems incident ID ' + str(incident['id'])
demisto_incident['occurred'] = incident['create_date']
demisto_incident['rawJSON'] = json.dumps(incident)
incidents.append(demisto_incident)
# updating last creation time if needed
if incident_creation_time > last_incident_creation_time:
last_incident_creation_time = incident_creation_time
demisto.setLastRun({'time': last_incident_creation_time})
demisto.incidents(incidents)
def test():
"""Verify that the first_fetch parameter is according to the standards, if exists.
Returns:
'ok' if test passed, anything else will fail the test.
"""
if FETCH_TIME:
try:
datetime.strptime(FETCH_TIME, TIME_FORMAT)
except ValueError as error:
return_error('There is something wrong with the fetch date. Error: {}'.format(error))
demisto.results('ok')
''' EXECUTION CODE '''
def get_client():
opts_dict = {
'host': SERVER,
'port': PORT,
'cafile': os.environ.get('SSL_CERT_FILE') if USE_SSL else 'false',
'org': ORG_NAME
}
if USERNAME and PASSWORD:
opts_dict.update({
'email': USERNAME,
'password': PASSWORD
})
elif API_KEY_ID and API_KEY_SECRET:
opts_dict.update({
'api_key_id': API_KEY_ID,
'api_key_secret': API_KEY_SECRET
})
else:
return_error('Credentials were not provided. Configure either the username and password'
' or the API Key and API Secret')
resilient_client = resilient.get_client(opts=opts_dict)
return resilient_client
def main():
client = get_client()
# Disable SDK logging warning messages
integration_logger = logging.getLogger('resilient') # type: logging.Logger
integration_logger.propagate = False
LOG('command is %s' % (demisto.command(),))
try:
args = demisto.args()
if demisto.command() == 'test-module':
# Checks if there is an authenticated session
test()
elif demisto.command() == 'fetch-incidents':
fetch_incidents(client)
elif demisto.command() == 'rs-search-incidents':
demisto.results(search_incidents_command(client, args))
elif demisto.command() == 'rs-update-incident':
demisto.results(update_incident_command(client, args))
elif demisto.command() == 'rs-incidents-get-members':
demisto.results(get_members_command(client, args['incident-id']))
elif demisto.command() == 'rs-get-incident':
demisto.results(get_incident_command(client, args['incident-id']))
elif demisto.command() == 'rs-incidents-update-member':
demisto.results(set_member_command(client, args['incident-id'], args['members']))
elif demisto.command() == 'rs-incidents-get-tasks':
demisto.results(get_tasks_command(client, args['incident-id']))
elif demisto.command() == 'rs-get-users':
demisto.results(get_users_command(client))
elif demisto.command() == 'rs-close-incident':
demisto.results(close_incident_command(client, args['incident-id']))
elif demisto.command() == 'rs-create-incident':
demisto.results(create_incident_command(client, args))
elif demisto.command() == 'rs-incident-artifacts':
demisto.results(incident_artifacts_command(client, args['incident-id']))
elif demisto.command() == 'rs-incident-attachments':
demisto.results(incident_attachments_command(client, args['incident-id']))
elif demisto.command() == 'rs-related-incidents':
demisto.results(related_incidents_command(client, args['incident-id']))
elif demisto.command() == 'rs-add-note':
demisto.results(add_note_command(client, args['incident-id'], args['note']))
elif demisto.command() == 'rs-add-artifact':
demisto.results(add_artifact_command(client, args['incident-id'], args['artifact-type'],
args['artifact-value'], args.get('artifact-description')))
except Exception as e:
LOG(e.message)
LOG.print_log()
raise
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| demisto/content | Packs/IBMResilientSystems/Integrations/IBMResilientSystems/IBMResilientSystems.py | Python | mit | 42,818 |
#!/usr/bin/env python
import os
import sys
import warnings
# Display deprecation warnings, which are hidden by default:
# https://docs.python.org/3.7/library/warnings.html#default-warning-filters
warnings.simplefilter('default', DeprecationWarning)
# Suppress noisy warnings from dependencies
# Reported in https://support.newrelic.com/tickets/338064
warnings.filterwarnings('ignore', category=DeprecationWarning, module='newrelic')
# https://github.com/encode/django-rest-framework/issues/6317
warnings.filterwarnings('ignore', category=DeprecationWarning, module='markdown.util')
# "Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working"
# corsheaders/checks.py -> https://github.com/ottoyiu/django-cors-headers/issues/374
# jinja2/runtime.py -> https://github.com/pallets/jinja/pull/867
# orderedmultidict/orderedmultidict.py -> https://github.com/gruns/orderedmultidict/pull/20
# promise/promise_list.py -> https://github.com/syrusakbary/promise/pull/67
warnings.filterwarnings(
'ignore', category=DeprecationWarning, message=r'Using or importing the ABCs .*'
)
# "the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses"
warnings.filterwarnings('ignore', category=DeprecationWarning, module='celery.utils.imports')
if __name__ == "__main__":
os.environ["DJANGO_SETTINGS_MODULE"] = "treeherder.config.settings"
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| jmaher/treeherder | manage.py | Python | mpl-2.0 | 1,561 |
import logging
from pylons import request, response, session, tmpl_context as c
from zkpylons.lib.helpers import redirect_to
from pylons.decorators import validate
from pylons.decorators.rest import dispatch_on
from formencode import validators, htmlfill, ForEach, Invalid
from formencode.variabledecode import NestedVariables
from zkpylons.lib.base import BaseController, render
from zkpylons.lib.ssl_requirement import enforce_ssl
from zkpylons.lib.validators import BaseSchema, ProductValidator
import zkpylons.lib.helpers as h
from authkit.authorize.pylons_adaptors import authorize
from authkit.permissions import ValidAuthKitUser
from zkpylons.lib.mail import email
from zkpylons.model import meta
from zkpylons.model.product import Product, ProductInclude
from zkpylons.model.product_category import ProductCategory
from zkpylons.config.lca_info import lca_info
log = logging.getLogger(__name__)
class NotExistingProductCategoryValidator(validators.FancyValidator):
def validate_python(self, values, state):
product_category = ProductCategory.find_by_name(values['product_category']['name'])
if product_category != None and product_category != c.product_category:
message = "Duplicate product category name"
error_dict = {'product_category.name': "Category name already in use"}
raise Invalid(message, values, state, error_dict=error_dict)
class ProductCategorySchema(BaseSchema):
name = validators.String(not_empty=True)
description = validators.String(not_empty=True)
note = validators.String()
display = validators.String(not_empty=True)
display_mode = validators.String()
display_order = validators.Int(min=0, max=2000000, not_empty=True)
invoice_free_products = validators.Bool(if_missing=False)
min_qty = validators.Int(min=0, max=2000000)
max_qty = validators.Int(min=0, max=2000000)
# TODO: check that min_qty <= max_qty
class NewProductCategorySchema(BaseSchema):
product_category = ProductCategorySchema()
pre_validators = [NestedVariables]
chained_validators = [NotExistingProductCategoryValidator()]
class EditProductCategorySchema(BaseSchema):
product_category = ProductCategorySchema()
pre_validators = [NestedVariables]
class ProductCategoryController(BaseController):
@enforce_ssl(required_all=True)
@authorize(h.auth.has_organiser_role)
def __before__(self, **kwargs):
pass
@dispatch_on(POST="_new")
def new(self):
return render('/product_category/new.mako')
@validate(schema=NewProductCategorySchema(), form='new', post_only=True, on_get=True, variable_decode=True)
def _new(self):
results = self.form_result['product_category']
c.product_category = ProductCategory(**results)
meta.Session.add(c.product_category)
meta.Session.commit()
h.flash("Category created")
redirect_to(action='view', id=c.product_category.id)
def view(self, id):
c.product_category = ProductCategory.find_by_id(id)
return render('/product_category/view.mako')
def stats(self, id):
c.can_edit = True
c.product_category = ProductCategory.find_by_id(id)
c.product_categories = ProductCategory.find_all()
return render('/product_category/stats.mako')
def index(self):
c.can_edit = True
c.product_category_collection = ProductCategory.find_all()
return render('/product_category/list.mako')
@dispatch_on(POST="_edit")
def edit(self, id):
c.product_category = ProductCategory.find_by_id(id)
defaults = h.object_to_defaults(c.product_category, 'product_category')
form = render('/product_category/edit.mako')
return htmlfill.render(form, defaults)
@validate(schema=EditProductCategorySchema(), form='edit', post_only=True, on_get=True, variable_decode=True)
def _edit(self, id):
product_category = ProductCategory.find_by_id(id)
for key in self.form_result['product_category']:
setattr(product_category, key, self.form_result['product_category'][key])
# update the objects with the validated form data
meta.Session.commit()
h.flash("The product_category has been updated successfully.")
redirect_to(action='view', id=id)
@dispatch_on(POST="_delete")
def delete(self, id):
"""Delete the product_category
GET will return a form asking for approval.
POST requests will delete the item.
"""
c.product_category = ProductCategory.find_by_id(id)
return render('/product_category/confirm_delete.mako')
@validate(schema=None, form='delete', post_only=True, on_get=True, variable_decode=True)
def _delete(self, id):
c.product_category = ProductCategory.find_by_id(id)
# For some reason cascading isn't working for me. Likely I just don't understand SA so I'll do it this way:
# first delete all of the products
for product in c.product_category.products:
# We also delete all of the productincludes for the products
for include in ProductInclude.find_by_product(product.id):
meta.Session.delete(include)
meta.Session.commit()
meta.Session.delete(product)
meta.Session.commit()
# Also delete any includes of the category
for include in ProductInclude.find_by_category(id):
meta.Session.delete(include)
meta.Session.commit()
meta.Session.delete(c.product_category)
meta.Session.commit()
h.flash("Category has been deleted.")
redirect_to('index')
| neillc/zookeepr | zkpylons/controllers/product_category.py | Python | gpl-2.0 | 5,696 |
from datetime import datetime, timedelta
from django.core.management import BaseCommand
from corehq.apps.saved_reports.models import ReportNotification
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domains', nargs='+')
parser.add_argument('-F', '--forward', action='store_true')
def handle(self, domains, forward=False, *args, **kwargs):
for domain in domains:
print(f'processing domain: {domain}')
reports = get_reports_by_domain(domain)
for report in reports:
previous_hour = report.hour
report = adjust_report(report, forward)
report.save()
print(f'Updated hour on report {report._id} from {previous_hour} to {report.hour}')
def get_reports_by_domain(domain):
key = [domain]
reports = ReportNotification.view('reportconfig/user_notifications',
reduce=False, include_docs=True, startkey=key, endkey=key + [{}])
return reports
DAYS_IN_WEEK = 7
def adjust_report(report, forward=False):
day = report.day + 1 if report.interval == 'weekly' else report.day # account for 0-indexed days
trigger_time = datetime.now().replace(hour=report.hour, minute=report.minute, day=day)
if forward:
trigger_time += timedelta(hours=1)
else:
trigger_time -= timedelta(hours=1)
report.hour = trigger_time.hour
if report.interval == 'weekly':
report.day = (trigger_time.day - 1) % DAYS_IN_WEEK
elif report.interval == 'monthly':
report.day = trigger_time.day
return report
| dimagi/commcare-hq | corehq/apps/saved_reports/management/commands/daylight_savings.py | Python | bsd-3-clause | 1,620 |
import scipy.sparse as sps
import tensorflow as tf
import numpy as np
import time
from antk.core import loader
import os
import datetime
import matplotlib.pyplot as plt
from pprint import pprint
# ============================================================================================
# ============================CONVENIENCE DICTIONARY==========================================
# ============================================================================================
OPT = {'adam': tf.train.AdamOptimizer,
'ada': tf.train.AdagradOptimizer,
'grad': tf.train.GradientDescentOptimizer,
'mom': tf.train.MomentumOptimizer}
# ============================================================================================
# ============================GLOBAL MODULE FUNCTIONS=========================================
# ============================================================================================
def get_feed_list(batch, placeholderdict, supplement=None, train=1, debug=False):
"""
:param batch: A dataset object.
:param placeholderdict: A dictionary where the keys match keys in batch, and the values are placeholder tensors
:param supplement: A dictionary of numpy input matrices with keys corresponding to placeholders in placeholderdict, where the row size of the matrices do not correspond to the number of datapoints. For use with input data intended for `embedding_lookup`_.
:param dropouts: Dropout tensors in graph.
:param dropout_flag: Whether to use Dropout probabilities for feed forward.
:return: A feed dictionary with keys of placeholder tensors and values of numpy matrices
"""
ph, dt = [], []
datadict = batch.features.copy()
datadict.update(batch.labels)
if supplement:
datadict.update(supplement)
for desc in placeholderdict:
ph.append(placeholderdict[desc])
if sps.issparse(datadict[desc]):
dt.append(datadict[desc].todense().astype(float, copy=False))
elif type(datadict[desc]) is loader.HotIndex:
dt.append(datadict[desc].vec)
else:
dt.append(datadict[desc])
if debug:
print('%s\n\tph: %s\n\tdt: %s' % (desc,
placeholderdict[desc].get_shape().as_list(),
datadict[desc].shape))
dropouts = tf.get_collection('dropout_prob')
if dropouts:
for prob in dropouts:
ph.append(prob[0])
if train == 1:
dt.append(prob[1])
else:
dt.append(1.0)
fd = {i: d for i, d in zip(ph, dt)}
bn_deciders = tf.get_collection('bn_deciders')
if bn_deciders:
fd.update({decider:[train] for decider in bn_deciders})
return fd
def parse_summary_val(summary_str):
"""
Helper function to parse numeric value from tf.scalar_summary
:param summary_str: Return value from running session on tf.scalar_summary
:return: A dictionary containing the numeric values.
"""
summary_proto = tf.Summary()
summary_proto.ParseFromString(summary_str)
summaries = {}
for val in summary_proto.value:
summaries[val.tag] = val.simple_value
return summaries
# ============================================================================================
# ============================GENERIC MODEL CLASS=============================================
# ============================================================================================
class Model(object):
"""
Generic model builder for training and predictions.
:param objective: Loss function
:param placeholderdict: A dictionary of placeholders
:param maxbadcount: For early stopping
:param momentum: The momentum for tf.MomentumOptimizer
:param mb: The mini-batch size
:param verbose: Whether to print dev error, and save_tensor evals
:param epochs: maximum number of epochs to train for.
:param learnrate: learnrate for gradient descent
:param save: Save best model to *best_model_path*.
:param opt: Optimization strategy. May be 'adam', 'ada', 'grad', 'momentum'
:param decay: Parameter for decaying learn rate.
:param evaluate: Evaluation metric
:param predictions: Predictions selected from feed forward pass.
:param logdir: Where to put the tensorboard data.
:param random_seed: Random seed for TensorFlow initializers.
:param model_name: Name for model
:param clip_gradients: The limit on gradient size. If 0.0 no clipping is performed.
:param make_histograms: Whether or not to make histograms for model weights and activations
:param best_model_path: File to save best model to during training.
:param save_tensors: A hashmap of str:Tensor mappings. Tensors are evaluated during training. Evaluations of these tensors on best model are accessible via property :any:`evaluated_tensors`.
:param tensorboard: Whether to make tensorboard histograms of weights and activations, and graphs of dev_error.
:return: :any:`Model`
"""
def __init__(self, objective, placeholderdict,
maxbadcount=20, momentum=None, mb=1000, verbose=True,
epochs=50, learnrate=0.003, save=False, opt='grad',
decay=[1, 1.0], evaluate=None, predictions=None,
logdir='log/', random_seed=None, model_name='generic',
clip_gradients=0.0, make_histograms=False,
best_model_path='/tmp/model.ckpt',
save_tensors={}, tensorboard=False, train_evaluate=None, debug=False):
self.objective = objective
self.debug = debug
for t in tf.get_collection('losses'):
self.objective += t
self._placeholderdict = placeholderdict
self.maxbadcount = maxbadcount
self.momentum = momentum
self.mb = mb
self.verbose = verbose
self.epochs = epochs
self.learnrate = learnrate
self.save = save
self.opt = opt
self.decay = decay
self.epoch_times = []
self.evaluate = evaluate
self.train_evaluate = train_evaluate
self._best_dev_error = float('inf')
self.predictor = predictions
self.random_seed = random_seed
self.session = tf.Session()
if self.random_seed is not None:
tf.set_random_seed(self.random_seed)
self.model_name = model_name
self.clip_gradients = clip_gradients
self.tensorboard = tensorboard
self.make_histograms = make_histograms
if self.make_histograms:
self.tensorboard = True
self.histogram_summaries = []
if not logdir.endswith('/'):
self.logdir = logdir + '/'
else:
self.logdir = logdir
os.system('mkdir ' + self.logdir)
self.save_tensors = save_tensors
self._completed_epochs = 0.0
self._best_completed_epochs = 0.0
self._evaluated_tensors = {}
self.deverror = []
self._badcount = 0
self.batch = tf.Variable(0)
self.train_eval = []
self.dev_spot = []
self.train_spot = []
# ================================================================
# ======================For tensorboard===========================
# ================================================================
if tensorboard:
self._init_summaries()
# =============================================================================
# ===================OPTIMIZATION STRATEGY=====================================
# =============================================================================
optimizer = OPT[self.opt]
decay_step = self.decay[0]
decay_rate = self.decay[1]
global_step = tf.Variable(0, trainable=False) #keeps track of the mini-batch iteration
if not (decay_step == 1 and decay_rate == 1.0):
self.learnrate = tf.train.exponential_decay(self.learnrate, self.batch*self.mb,
decay_step, decay_rate, name='learnrate_decay')
if self.clip_gradients > 0.0:
params = tf.trainable_variables()
self.gradients = tf.gradients(self.objective, params)
if self.clip_gradients > 0.0:
self.gradients, self.gradients_norm = tf.clip_by_global_norm(
self.gradients, self.clip_gradients)
grads_and_vars = zip(self.gradients, params)
if self.opt == 'mom':
self.train_step = optimizer(self.learnrate,
self.momentum).apply_gradients(grads_and_vars,
global_step=self.batch,
name="train")
else:
self.train_step = optimizer(self.learnrate).apply_gradients(grads_and_vars,
global_step=self.batch,
name="train")
else:
if self.opt == 'mom':
self.train_step = optimizer(self.learnrate,
self.momentum).minimize(self.objective,
global_step=self.batch)
else:
self.train_step = optimizer(self.learnrate).minimize(self.objective,
global_step=self.batch)
# =============================================================================
# ===================Initialize graph =====================================
# =============================================================================
self.session.run(tf.initialize_all_variables())
if save:
self.saver = tf.train.Saver()
self.best_model_path = best_model_path
self.save_path = self.saver.save(self.session, self.best_model_path)
# ======================================================================
# ================Properites============================================
# ======================================================================
@property
def placeholderdict(self):
'''
Dictionary of model placeholders
'''
return self._placeholderdict
@property
def best_dev_error(self):
"""
The best dev error reached during training.
"""
return self._best_dev_error
@property
def average_secs_per_epoch(self):
"""
The average number of seconds to complete an epoch.
"""
return np.sum(np.array(self.epoch_times))/self._completed_epochs
@property
def evaluated_tensors(self):
'''
A dictionary of evaluations on best model for tensors and keys specified by *save_tensors* argument to constructor.
'''
return self._evaluated_tensors
@property
def completed_epochs(self):
'''
Number of epochs completed during training (fractional)
'''
return self._completed_epochs
@property
def best_completed_epochs(self):
'''
Number of epochs completed during at point of best dev eval during training (fractional)
'''
return self._best_completed_epochs
def plot_train_dev_eval(self, figure_file='testfig.pdf'):
plt.plot(self.dev_spot, self.deverror, label='dev')
plt.plot(self.train_spot, self.train_eval, label='train')
plt.ylabel('Error')
plt.xlabel('Epoch')
plt.legend(loc='upper right')
plt.savefig(figure_file)
def predict(self, data, supplement=None):
"""
:param data: :any:`DataSet` to make predictions from.
:return: A set of predictions from feed forward defined by :any:`self.predictions`
"""
fd = get_feed_list(data, self.placeholderdict, supplement=supplement, train=0, debug=self.debug)
return self.session.run(self.predictor,
feed_dict=fd)
def eval(self, tensor_in, data, supplement=None):
"""
Evaluation of model.
:param data: :any:`DataSet` to evaluate on.
:return: Result of evaluating on data for :any:`self.evaluate`
"""
fd = get_feed_list(data, self.placeholderdict, supplement=supplement, train=0, debug=self.debug)
return self.session.run(tensor_in, feed_dict=fd)
def train(self, train,
dev=None, supplement=None,
eval_schedule='epoch',
train_dev_eval_factor=3):
"""
:param data: :any:`DataSet` to train on.
:return: A trained :any:`Model`
"""
self._completed_epochs = 0.0
if self.save:
self.saver.restore(self.session, self.best_model_path)
# ========================================================
# ===========Check data to see if dev eval================
# ========================================================
if eval_schedule == 'epoch':
eval_schedule = train.num_examples
self._badcount = 0
start_time = time.time()
# ============================================================================================
# =============================TRAINING=======================================================
# ============================================================================================
counter = 0
train_eval_counter = 0
while self._completed_epochs < self.epochs: # keeps track of the epoch iteration
# ==============PER MINI-BATCH=====================================
newbatch = train.next_batch(self.mb)
fd = get_feed_list(newbatch, self.placeholderdict, supplement, debug=self.debug)
self.session.run(self.train_step, feed_dict=fd)
counter += self.mb
train_eval_counter += self.mb
self._completed_epochs += float(self.mb)/float(train.num_examples)
if self.train_evaluate is not None and train_eval_counter >= train_dev_eval_factor*eval_schedule:
self.train_eval.append(self.eval(self.evaluate, train, supplement))
self.train_spot.append(self._completed_epochs)
if np.isnan(self.train_eval[-1]):
print("Aborting training...train evaluates to nan.")
break
if self.verbose:
print("epoch: %f train eval: %.10f" % (self._completed_epochs, self.train_eval[-1]))
train_eval_counter = 0
if (counter >= eval_schedule or self._completed_epochs >= self.epochs):
#=================PER eval_schedule==================================
self._log_summaries(dev, supplement)
counter = 0
if dev:
self.deverror.append(self.eval(self.evaluate, dev, supplement))
self.dev_spot.append(self._completed_epochs)
if np.isnan(self.deverror[-1]):
print("Aborting training...dev evaluates to nan.")
break
if self.verbose:
print("epoch: %f dev error: %.10f" % (self._completed_epochs, self.deverror[-1]))
for tname in self.save_tensors:
self._evaluated_tensors[tname] = self.eval(self.save_tensors[tname], dev, supplement)
if self.verbose:
print("\t%s: %s" % (tname, self._evaluated_tensors[tname]))
# ================Early Stopping====================================
if self.deverror[-1] < self.best_dev_error:
self._badcount = 0
self._best_dev_error = self.deverror[-1]
if self.save:
self.save_path = self.saver.save(self.session, self.best_model_path)
self._best_completed_epochs = self._completed_epochs
else:
self._badcount += 1
if self._badcount > self.maxbadcount:
print('badcount exceeded: %d' % self._badcount)
break
# ==================================================================
self.epoch_times.append(time.time() - start_time)
start_time = time.time()
# ================================================================
# ======================For tensorboard===========================
# ================================================================
def _init_summaries(self):
if self.make_histograms:
self.histogram_summaries.extend(map(tf.histogram_summary,
[var.name for var in tf.trainable_variables()],
tf.trainable_variables()))
self.histogram_summaries.extend(map(tf.histogram_summary,
['normalization/'+n.name for n in tf.get_collection('normalized_activations')],
tf.get_collection('normalized_activations')))
self.histogram_summaries.extend(map(tf.histogram_summary,
['activation/'+a.name for a in tf.get_collection('activation_layers')],
tf.get_collection('activation_layers')))
self.loss_summary = tf.scalar_summary('Loss', self.objective)
self.dev_error_summary = tf.scalar_summary('dev_error', self.evaluate)
summary_directory = os.path.join(self.logdir,
self.model_name + '-' +
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
self._summary_writer = tf.train.SummaryWriter(summary_directory,
self.session.graph.as_graph_def())
def _log_summaries(self, dev, supplement):
fd = get_feed_list(dev, self.placeholderdict, supplement=supplement, train=0, debug=self.debug)
if self.tensorboard:
if self.make_histograms:
sum_str = self.session.run(self.histogram_summaries, fd)
for summary in sum_str:
self._summary_writer.add_summary(summary, self._completed_epochs)
loss_sum_str = self.session.run(self.loss_summary, fd)
self._summary_writer.add_summary(loss_sum_str, self._completed_epochs)
if dev:
if self.tensorboard:
dev_sum_str = self.session.run(self.dev_error_summary, fd)
self._summary_writer.add_summary(dev_sum_str, self._completed_epochs)
| aarontuor/antk | antk/core/generic_model.py | Python | mit | 19,170 |
import multiprocessing
import socket
from util.ctr.dsp.dsp_predict import OnlineService
def handle(connection, address):
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("process-%r" % (address,))
try:
logger.debug("Connected %r at %r", connection, address)
while True:
data = connection.recv(1024)
if data == b'':
logger.debug("Socket closed remotely")
break
predict_service = OnlineService.getInstance()
predict = predict_service.predict(str(data))
# logger.debug("Received data %r", data)
connection.sendall(str(predict).encode())
# logger.debug("Sent data")
except:
logger.exception("Problem handling request")
connection.sendall(str(-1).encode())
finally:
logger.debug("Closing socket")
connection.close()
class Server(object):
def __init__(self, hostname, port):
import logging
self.logger = logging.getLogger("server")
self.hostname = hostname
self.port = port
def start(self):
self.logger.debug("listening")
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.hostname, self.port))
self.socket.listen(1)
while True:
conn, address = self.socket.accept()
self.logger.debug("Got connection")
process = multiprocessing.Process(target=handle, args=(conn, address))
process.daemon = True
process.start()
self.logger.debug("Started process %r", process)
if __name__ == "__main__":
predict_service = OnlineService.getInstance()
print('begin load weights')
predict_service.load_w('/home/laomie/projects/python/data/dsp_model.csv')
predict_service.load_learner(.1, .4, .08)
print('end load weights')
import logging
logging.basicConfig(level=logging.DEBUG)
server = Server("localhost", 50051)
try:
logging.info("Listening")
server.start()
except:
logging.exception("Unexpected exception")
finally:
logging.info("Shutting down")
for process in multiprocessing.active_children():
logging.info("Shutting down process %r", process)
process.terminate()
process.join()
logging.info("All done")
| hnlaomie/python-tools | util/ctr/dsp/socket/socket_server.py | Python | mit | 2,427 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from django_telegrambot.apps import DjangoTelegramBot
from django.conf import settings
from bot.models import Alerta, AlertaUsuario, User, Grupo
from django.db.models import Q
import requests
from datetime import datetime, timedelta
from emoji import emojize
from bot.tasks import get_dolar_airtm, get_price_yadio, get_localbitcoin_precio, get_dolar_gobierno
URL_BTC_USD = settings.CRIPTO_MONEDAS.get("URL_BTC_USD")
URL_ETH_USD = settings.CRIPTO_MONEDAS.get("URL_ETH_USD")
URL_LTC_USD = settings.CRIPTO_MONEDAS.get("URL_LTC_USD")
URL_BCH_USD = settings.CRIPTO_MONEDAS.get("URL_BCH_USD")
URL_DAS_USD = settings.CRIPTO_MONEDAS.get("URL_DAS_USD")
URL_BTG_USD = settings.CRIPTO_MONEDAS.get("URL_BTG_USD")
URL_XMR_USD = settings.CRIPTO_MONEDAS.get("URL_XMR_USD")
URL_XRP_USD = settings.CRIPTO_MONEDAS.get("URL_XRP_USD")
URL_PRICE_USD = settings.CRIPTO_MONEDAS.get("URL_PRICE_USD")
class Command(BaseCommand):
help = "Verifica el precio actual del botcoin, si cambio envia un alerta"
def add_arguments(self, parser):
parser.add_argument('comando', nargs='+', type=str)
def get_price(self, url):
return requests.get(url).json().get("data").get("rates").get("USD")
def obtener_precio_dolar_paralelo_venezuela(self):
rq = requests.get('https://s3.amazonaws.com/dolartoday/data.json')
devuelto = rq
response = devuelto.json()['USD']['transferencia']
return response
def obtener_precio(self, comando):
if comando == 'bitcoin':
ultimo_precio = float(self.get_price(URL_BTC_USD))
elif comando == 'dolartoday':
ultimo_precio = self.obtener_precio_dolar_paralelo_venezuela()
elif comando == 'ethereum':
ultimo_precio = float(self.get_price(URL_ETH_USD))
elif comando == 'litecoin':
ultimo_precio = float(self.get_price(URL_LTC_USD))
elif comando == 'dolarairtm':
try:
ultimo_precio = float(get_dolar_airtm())
except:
ultimo_precio = 0
elif comando == 'dolaryadio':
try:
ultimo_precio = float(get_price_yadio())
except:
ultimo_precio = 0
elif comando == 'dolarlocalbitcoin':
try:
ultimo_precio = float(get_localbitcoin_precio())
except:
ultimo_precio = 0
elif comando == 'dolarcasasdecambio':
try:
ultimo_precio = float(get_dolar_gobierno())
except:
ultimo_precio = 0
else:
ultimo_precio = 0
return ultimo_precio
def validar_alarma(self, comando, chat):
print(chat.id, chat)
ultimo_precio = chat.ultimo_precio if chat.ultimo_precio else 0
precio_actual = self.obtener_precio(comando)
if precio_actual > ultimo_precio:
alta_o_baja = ":arrow_up:"
elif precio_actual < ultimo_precio:
alta_o_baja = ":arrow_down:"
else:
alta_o_baja = "Se mantuvo"
segundos_transcurridos_ultimo_aviso = datetime.now().timestamp() - \
chat.ultima_actualizacion.timestamp()
porc_cambio = chat.porcentaje_cambio
paso = False
if chat.frecuencia:
if segundos_transcurridos_ultimo_aviso >= (chat.frecuencia * 60):
paso = True
if chat.porcentaje_cambio:
if precio_actual >= (ultimo_precio + (ultimo_precio * (porc_cambio / 100))) or \
precio_actual <= (ultimo_precio - (ultimo_precio * (porc_cambio / 100))):
paso = True
preparar_mensaje = ":bell: {0} {1} a: {2:0,.2f}".format(
comando,
alta_o_baja,
precio_actual
)
mensaje_a_chat = emojize(preparar_mensaje, use_aliases=True)
return paso, mensaje_a_chat
def generar_alerta(self, comando):
precio_actual = self.obtener_precio(comando)
lista_de_alertas = AlertaUsuario.objects.filter(
alerta__comando=comando, estado="A").exclude(
ultimo_precio=precio_actual)
for chat in lista_de_alertas:
enviar, mensaje_a_chat = self.validar_alarma(comando, chat)
if enviar:
# Envio el Alerta
try:
message = DjangoTelegramBot.dispatcher.bot.sendMessage(
chat.chat_id,
mensaje_a_chat)
try:
chat_msg_id = message.message_id
# DjangoTelegramBot.dispatcher.bot.pinChatMessage(
# chat_id=chat.chat_id,
# message_id=chat_msg_id)
except:
pass
except Exception as E:
print('Error Alarmas', E)
AlertaUsuario.objects.filter(chat_id=chat.chat_id).delete()
User.objects.filter(chat_id=chat.chat_id).delete()
Grupo.objects.filter(grupo_id=chat.chat_id).delete()
continue
# Actualizo la Fecha
AlertaUsuario.objects.filter(id=chat.id).update(
ultima_actualizacion=datetime.now(),
ultimo_precio=precio_actual)
def handle(self, *args, **options):
if 'dolartoday' in options.get("comando"):
self.generar_alerta('dolartoday')
elif 'bitcoin' in options.get("comando"):
self.generar_alerta("bitcoin")
elif 'ethereum' in options.get("comando"):
self.generar_alerta("ethereum")
elif 'litecoin' in options.get("comando"):
self.generar_alerta("litecoin")
elif 'dolarairtm' in options.get("comando"):
self.generar_alerta("dolarairtm")
elif 'dolaryadio' in options.get("comando"):
self.generar_alerta("dolaryadio")
elif 'dolarlocalbitcoin' in options.get("comando"):
self.generar_alerta("dolarlocalbitcoin")
elif 'dolarcasasdecambio' in options.get("comando"):
self.generar_alerta("dolarcasasdecambio")
self.stdout.write('Ejecutando comando')
| foxcarlos/decimemijobot | bot/management/commands/alerta_bitcoin.py | Python | gpl-3.0 | 6,414 |
# -*- coding: utf-8 -*-
from taburet.report import *
def pytest_funcarg__sheet(request):
return Worksheet('some')
def test_cell_access(sheet):
sheet[0:0].value = 5
sheet[1:3].value = 'значение'
assert sheet._cells[0][0].value == 5
assert sheet._cells[1][3].value == 'значение'
def test_row_access(sheet):
assert sheet[3:].index == 3
def test_column_access(sheet):
assert sheet[:5].index == 5 | baverman/taburet | tests/test_report.py | Python | mit | 440 |
from __future__ import division
from pandas.compat import range, lrange, zip, reduce
from pandas import compat
import numpy as np
from pandas.core.base import StringMixin
from pandas.util.decorators import cache_readonly
from pandas.core.frame import DataFrame
from pandas.core.panel import Panel
from pandas.core.series import Series
import pandas.stats.common as common
from pandas.stats.math import inv
from pandas.stats.ols import _combine_rhs
class VAR(StringMixin):
"""
Estimates VAR(p) regression on multivariate time series data
presented in pandas data structures.
Parameters
----------
data : DataFrame or dict of Series
p : lags to include
"""
def __init__(self, data, p=1, intercept=True):
try:
import statsmodels.tsa.vector_ar.api as sm_var
except ImportError:
import scikits.statsmodels.tsa.var as sm_var
self._data = DataFrame(_combine_rhs(data))
self._p = p
self._columns = self._data.columns
self._index = self._data.index
self._intercept = intercept
@cache_readonly
def aic(self):
"""Returns the Akaike information criterion."""
return self._ic['aic']
@cache_readonly
def bic(self):
"""Returns the Bayesian information criterion."""
return self._ic['bic']
@cache_readonly
def beta(self):
"""
Returns a DataFrame, where each column x1 contains the betas
calculated by regressing the x1 column of the VAR input with
the lagged input.
Returns
-------
DataFrame
"""
d = dict([(key, value.beta)
for (key, value) in compat.iteritems(self.ols_results)])
return DataFrame(d)
def forecast(self, h):
"""
Returns a DataFrame containing the forecasts for 1, 2, ..., n time
steps. Each column x1 contains the forecasts of the x1 column.
Parameters
----------
n: int
Number of time steps ahead to forecast.
Returns
-------
DataFrame
"""
forecast = self._forecast_raw(h)[:, 0, :]
return DataFrame(forecast, index=lrange(1, 1 + h),
columns=self._columns)
def forecast_cov(self, h):
"""
Returns the covariance of the forecast residuals.
Returns
-------
DataFrame
"""
return [DataFrame(value, index=self._columns, columns=self._columns)
for value in self._forecast_cov_raw(h)]
def forecast_std_err(self, h):
"""
Returns the standard errors of the forecast residuals.
Returns
-------
DataFrame
"""
return DataFrame(self._forecast_std_err_raw(h),
index=lrange(1, 1 + h), columns=self._columns)
@cache_readonly
def granger_causality(self):
"""Returns the f-stats and p-values from the Granger Causality Test.
If the data consists of columns x1, x2, x3, then we perform the
following regressions:
x1 ~ L(x2, x3)
x1 ~ L(x1, x3)
x1 ~ L(x1, x2)
The f-stats of these results are placed in the 'x1' column of the
returned DataFrame. We then repeat for x2, x3.
Returns
-------
Dict, where 'f-stat' returns the DataFrame containing the f-stats,
and 'p-value' returns the DataFrame containing the corresponding
p-values of the f-stats.
"""
from pandas.stats.api import ols
from scipy.stats import f
d = {}
for col in self._columns:
d[col] = {}
for i in range(1, 1 + self._p):
lagged_data = self._lagged_data[i].filter(
self._columns - [col])
for key, value in compat.iteritems(lagged_data):
d[col][_make_param_name(i, key)] = value
f_stat_dict = {}
p_value_dict = {}
for col, y in compat.iteritems(self._data):
ssr_full = (self.resid[col] ** 2).sum()
f_stats = []
p_values = []
for col2 in self._columns:
result = ols(y=y, x=d[col2])
resid = result.resid
ssr_reduced = (resid ** 2).sum()
M = self._p
N = self._nobs
K = self._k * self._p + 1
f_stat = ((ssr_reduced - ssr_full) / M) / (ssr_full / (N - K))
f_stats.append(f_stat)
p_value = f.sf(f_stat, M, N - K)
p_values.append(p_value)
f_stat_dict[col] = Series(f_stats, self._columns)
p_value_dict[col] = Series(p_values, self._columns)
f_stat_mat = DataFrame(f_stat_dict)
p_value_mat = DataFrame(p_value_dict)
return {
'f-stat': f_stat_mat,
'p-value': p_value_mat,
}
@cache_readonly
def ols_results(self):
"""
Returns the results of the regressions:
x_1 ~ L(X)
x_2 ~ L(X)
...
x_k ~ L(X)
where X = [x_1, x_2, ..., x_k]
and L(X) represents the columns of X lagged 1, 2, ..., n lags
(n is the user-provided number of lags).
Returns
-------
dict
"""
from pandas.stats.api import ols
d = {}
for i in range(1, 1 + self._p):
for col, series in compat.iteritems(self._lagged_data[i]):
d[_make_param_name(i, col)] = series
result = dict([(col, ols(y=y, x=d, intercept=self._intercept))
for col, y in compat.iteritems(self._data)])
return result
@cache_readonly
def resid(self):
"""
Returns the DataFrame containing the residuals of the VAR regressions.
Each column x1 contains the residuals generated by regressing the x1
column of the input against the lagged input.
Returns
-------
DataFrame
"""
d = dict([(col, series.resid)
for (col, series) in compat.iteritems(self.ols_results)])
return DataFrame(d, index=self._index)
@cache_readonly
def summary(self):
template = """
%(banner_top)s
Number of Observations: %(nobs)d
AIC: %(aic).3f
BIC: %(bic).3f
%(banner_coef)s
%(coef_table)s
%(banner_end)s
"""
params = {
'banner_top': common.banner('Summary of VAR'),
'banner_coef': common.banner('Summary of Estimated Coefficients'),
'banner_end': common.banner('End of Summary'),
'coef_table': self.beta,
'aic': self.aic,
'bic': self.bic,
'nobs': self._nobs,
}
return template % params
@cache_readonly
def _alpha(self):
"""
Returns array where the i-th element contains the intercept
when regressing the i-th column of self._data with the lagged data.
"""
if self._intercept:
return self._beta_raw[-1]
else:
return np.zeros(self._k)
@cache_readonly
def _beta_raw(self):
return np.array([list(self.beta[col].values()) for col in self._columns]).T
def _trans_B(self, h):
"""
Returns 0, 1, ..., (h-1)-th power of transpose of B as defined in
equation (4) on p. 142 of the Stata 11 Time Series reference book.
"""
result = [np.eye(1 + self._k * self._p)]
row1 = np.zeros((1, 1 + self._k * self._p))
row1[0, 0] = 1
v = self._alpha.reshape((self._k, 1))
row2 = np.hstack(tuple([v] + self._lag_betas))
m = self._k * (self._p - 1)
row3 = np.hstack((
np.zeros((m, 1)),
np.eye(m),
np.zeros((m, self._k))
))
trans_B = np.vstack((row1, row2, row3)).T
result.append(trans_B)
for i in range(2, h):
result.append(np.dot(trans_B, result[i - 1]))
return result
@cache_readonly
def _x(self):
values = np.array([
list(self._lagged_data[i][col].values())
for i in range(1, 1 + self._p)
for col in self._columns
]).T
x = np.hstack((np.ones((len(values), 1)), values))[self._p:]
return x
@cache_readonly
def _cov_beta(self):
cov_resid = self._sigma
x = self._x
inv_cov_x = inv(np.dot(x.T, x))
return np.kron(inv_cov_x, cov_resid)
def _data_xs(self, i):
"""
Returns the cross-section of the data at the given timestep.
"""
return self._data.values[i]
def _forecast_cov_raw(self, n):
resid = self._forecast_cov_resid_raw(n)
# beta = self._forecast_cov_beta_raw(n)
# return [a + b for a, b in zip(resid, beta)]
# TODO: ignore the beta forecast std err until it's verified
return resid
def _forecast_cov_beta_raw(self, n):
"""
Returns the covariance of the beta errors for the forecast at
1, 2, ..., n timesteps.
"""
p = self._p
values = self._data.values
T = len(values) - self._p - 1
results = []
for h in range(1, n + 1):
psi = self._psi(h)
trans_B = self._trans_B(h)
sum = 0
cov_beta = self._cov_beta
for t in range(T + 1):
index = t + p
y = values.take(lrange(index, index - p, -1), axis=0).ravel()
trans_Z = np.hstack(([1], y))
trans_Z = trans_Z.reshape(1, len(trans_Z))
sum2 = 0
for i in range(h):
ZB = np.dot(trans_Z, trans_B[h - 1 - i])
prod = np.kron(ZB, psi[i])
sum2 = sum2 + prod
sum = sum + chain_dot(sum2, cov_beta, sum2.T)
results.append(sum / (T + 1))
return results
def _forecast_cov_resid_raw(self, h):
"""
Returns the covariance of the residual errors for the forecast at
1, 2, ..., h timesteps.
"""
psi_values = self._psi(h)
sum = 0
result = []
for i in range(h):
psi = psi_values[i]
sum = sum + chain_dot(psi, self._sigma, psi.T)
result.append(sum)
return result
def _forecast_raw(self, h):
"""
Returns the forecast at 1, 2, ..., h timesteps in the future.
"""
k = self._k
result = []
for i in range(h):
sum = self._alpha.reshape(1, k)
for j in range(self._p):
beta = self._lag_betas[j]
idx = i - j
if idx > 0:
y = result[idx - 1]
else:
y = self._data_xs(idx - 1)
sum = sum + np.dot(beta, y.T).T
result.append(sum)
return np.array(result)
def _forecast_std_err_raw(self, h):
"""
Returns the standard error of the forecasts
at 1, 2, ..., n timesteps.
"""
return np.array([np.sqrt(np.diag(value))
for value in self._forecast_cov_raw(h)])
@cache_readonly
def _ic(self):
"""
Returns the Akaike/Bayesian information criteria.
"""
RSS = self._rss
k = self._p * (self._k * self._p + 1)
n = self._nobs * self._k
return {'aic': 2 * k + n * np.log(RSS / n),
'bic': n * np.log(RSS / n) + k * np.log(n)}
@cache_readonly
def _k(self):
return len(self._columns)
@cache_readonly
def _lag_betas(self):
"""
Returns list of B_i, where B_i represents the (k, k) matrix
with the j-th row containing the betas of regressing the j-th
column of self._data with self._data lagged i time steps.
First element is B_1, second element is B_2, etc.
"""
k = self._k
b = self._beta_raw
return [b[k * i: k * (i + 1)].T for i in range(self._p)]
@cache_readonly
def _lagged_data(self):
return dict([(i, self._data.shift(i))
for i in range(1, 1 + self._p)])
@cache_readonly
def _nobs(self):
return len(self._data) - self._p
def _psi(self, h):
"""
psi value used for calculating standard error.
Returns [psi_0, psi_1, ..., psi_(h - 1)]
"""
k = self._k
result = [np.eye(k)]
for i in range(1, h):
result.append(sum(
[np.dot(result[i - j], self._lag_betas[j - 1])
for j in range(1, 1 + i)
if j <= self._p]))
return result
@cache_readonly
def _resid_raw(self):
resid = np.array([self.ols_results[col]._resid_raw
for col in self._columns])
return resid
@cache_readonly
def _rss(self):
"""Returns the sum of the squares of the residuals."""
return (self._resid_raw ** 2).sum()
@cache_readonly
def _sigma(self):
"""Returns covariance of resids."""
k = self._k
n = self._nobs
resid = self._resid_raw
return np.dot(resid, resid.T) / (n - k)
def __unicode__(self):
return self.summary
def lag_select(data, max_lags=5, ic=None):
"""
Select number of lags based on a variety of information criteria
Parameters
----------
data : DataFrame-like
max_lags : int
Maximum number of lags to evaluate
ic : {None, 'aic', 'bic', ...}
Choosing None will just display the results
Returns
-------
None
"""
pass
class PanelVAR(VAR):
"""
Performs Vector Autoregression on panel data.
Parameters
----------
data: Panel or dict of DataFrame
lags: int
"""
def __init__(self, data, lags, intercept=True):
self._data = _prep_panel_data(data)
self._p = lags
self._intercept = intercept
self._columns = self._data.items
@cache_readonly
def _nobs(self):
"""Returns the number of observations."""
_, timesteps, entities = self._data.values.shape
return (timesteps - self._p) * entities
@cache_readonly
def _rss(self):
"""Returns the sum of the squares of the residuals."""
return (self.resid.values ** 2).sum()
def forecast(self, h):
"""
Returns the forecasts at 1, 2, ..., n timesteps in the future.
"""
forecast = self._forecast_raw(h).T.swapaxes(1, 2)
index = lrange(1, 1 + h)
w = Panel(forecast, items=self._data.items, major_axis=index,
minor_axis=self._data.minor_axis)
return w
@cache_readonly
def resid(self):
"""
Returns the DataFrame containing the residuals of the VAR regressions.
Each column x1 contains the residuals generated by regressing the x1
column of the input against the lagged input.
Returns
-------
DataFrame
"""
d = dict([(key, value.resid)
for (key, value) in compat.iteritems(self.ols_results)])
return Panel.fromDict(d)
def _data_xs(self, i):
return self._data.values[:, i, :].T
@cache_readonly
def _sigma(self):
"""Returns covariance of resids."""
k = self._k
resid = _drop_incomplete_rows(self.resid.toLong().values)
n = len(resid)
return np.dot(resid.T, resid) / (n - k)
def _prep_panel_data(data):
"""Converts the given data into a Panel."""
if isinstance(data, Panel):
return data
return Panel.fromDict(data)
def _drop_incomplete_rows(array):
mask = np.isfinite(array).all(1)
indices = np.arange(len(array))[mask]
return array.take(indices, 0)
def _make_param_name(lag, name):
return 'L%d.%s' % (lag, name)
def chain_dot(*matrices):
"""
Returns the dot product of the given matrices.
Parameters
----------
matrices: argument list of ndarray
"""
return reduce(lambda x, y: np.dot(y, x), matrices[::-1])
| Vvucinic/Wander | venv_2_7/lib/python2.7/site-packages/pandas/stats/var.py | Python | artistic-2.0 | 16,319 |
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.sol_refactored.objects import fields
# NFV-SOL 003
# - v2.6.1 9.5.4.3 (API version: 1.3.0)
# - v2.7.1 9.5.4.3 (API version: 1.3.0)
# - v2.8.1 9.5.4.3 (API version: 1.3.0)
# - v3.3.1 9.5.4.3 (API version: 1.4.0)
class GrantedLcmOperationType(fields.BaseTackerEnum):
INSTANTIATE = 'INSTANTIATE'
SCALE = 'SCALE'
SCALE_TO_LEVEL = 'SCALE_TO_LEVEL'
CHANGE_FLAVOUR = 'CHANGE_FLAVOUR'
TERMINATE = 'TERMINATE'
HEAL = 'HEAL'
OPERATE = 'OPERATE'
CHANGE_EXT_CONN = 'CHANGE_EXT_CONN'
CHANGE_VNFPKG = 'CHANGE_VNFPKG' # since 1.4.0
CREATE_SNAPSHOT = 'CREATE_SNAPSHOT' # since 1.4.0
REVERT_TO_SNAPSHOT = 'REVERT_TO_SNAPSHOT' # since 1.4.0
ALL = (INSTANTIATE, SCALE, SCALE_TO_LEVEL, CHANGE_FLAVOUR,
TERMINATE, HEAL, OPERATE, CHANGE_EXT_CONN, CHANGE_VNFPKG,
CREATE_SNAPSHOT, REVERT_TO_SNAPSHOT)
class GrantedLcmOperationTypeField(fields.BaseEnumField):
AUTO_TYPE = GrantedLcmOperationType()
| openstack/tacker | tacker/sol_refactored/objects/v1/fields.py | Python | apache-2.0 | 1,629 |
# coding=utf-8
from HTMLParser import HTMLParser
__author__ = 'xubinggui'
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print('<%s>' % tag)
def handle_endtag(self, tag):
print('</%s>' % tag)
def handle_startendtag(self, tag, attrs):
print('<%s/>' % tag)
def handle_data(self, data):
print('data')
def handle_comment(self, data):
print('<!-- -->')
def handle_entityref(self, name):
print('&%s;' % name)
def handle_charref(self, name):
print('&#%s;' % name)
parser = MyHTMLParser()
parser.feed('<html><head></head><body><p>Some <a href=\"#\">html</a> tutorial...<br>END</p></body></html>') | xu6148152/Binea_Python_Project | python_practice/batteries_included/HTMLParser.test.py | Python | mit | 706 |
# -*- coding: utf-8 -*-
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from rest_framework_extensions.mixins import NestedViewSetMixin
from rest_framework_extensions.decorators import action, link
from .models import (
DefaultRouterUserModel,
DefaultRouterGroupModel,
DefaultRouterPermissionModel,
)
class UserViewSet(NestedViewSetMixin, ModelViewSet):
model = DefaultRouterUserModel
class GroupViewSet(NestedViewSetMixin, ModelViewSet):
model = DefaultRouterGroupModel
class PermissionViewSet(NestedViewSetMixin, ModelViewSet):
model = DefaultRouterPermissionModel | lock8/drf-extensions | tests_app/tests/functional/routers/extended_default_router/views.py | Python | mit | 644 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from io import BytesIO
from _shaded_thriftpy._compat import CYTHON
from ..base import TTransportBase
class TBufferedTransport(TTransportBase):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, buf_size=DEFAULT_BUFFER):
self._trans = trans
self._wbuf = BytesIO()
self._rbuf = BytesIO(b"")
self._buf_size = buf_size
def is_open(self):
return self._trans.is_open()
def open(self):
return self._trans.open()
def close(self):
return self._trans.close()
def _read(self, sz):
ret = self._rbuf.read(sz)
rest_len = sz - len(ret)
if rest_len == 0:
return ret
buf = self._trans.read(max(rest_len, self._buf_size))
ret = ret + buf[:rest_len]
buf = buf[rest_len:]
self._rbuf = BytesIO(buf)
return ret
def write(self, buf):
self._wbuf.write(buf)
def flush(self):
out = self._wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self._wbuf = BytesIO()
self._trans.write(out)
self._trans.flush()
def getvalue(self):
return self._trans.getvalue()
class TBufferedTransportFactory(object):
def get_transport(self, trans):
return TBufferedTransport(trans)
if CYTHON:
from .cybuffered import TCyBufferedTransport, TCyBufferedTransportFactory # noqa
| jwren/intellij-community | python/helpers/third_party/thriftpy/_shaded_thriftpy/transport/buffered/__init__.py | Python | apache-2.0 | 1,680 |
import json
import logging
import os
import requests
from ryu.app import simple_switch
from webob import Response
from ryu.app.wsgi import ControllerBase, WSGIApplication, route
from ryu.app import ofctl_rest
from ryu.lib.mac import haddr_to_bin
from ryu.base import app_manager
from ryu.controller import ofp_event, dpset
from ryu.lib.ofctl_v1_0 import mod_flow_entry, delete_flow_entry
simple_switch_instance_name = 'qchannel_api_app'
qconf_default = '/var/lib/qcrypt/channels.json'
qconf = 'channels.json'
#qpath = '/etc/qcrypt/'
qpath = '/usr/share/qcrypt/'
#class QuantumSwitchRest(simple_switch.SimpleSwitch, ofctl_rest.StatsController):
class QuantumSwitchRest(simple_switch.SimpleSwitch):
_CONTEXTS = { 'dpset': dpset.DPSet,
'wsgi': WSGIApplication }
def __init__(self, *args, **kwargs):
super(QuantumSwitchRest, self).__init__(*args, **kwargs)
self.switches = {}
dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
self.data = {}
self.data['dpset'] = dpset
self.data['waiters'] = {}
wsgi.register(QuantumSwitchController, {simple_switch_instance_name : self})
def add_flow(self, datapath, in_port, dst, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(
in_port=in_port, dl_dst=haddr_to_bin(dst))
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, match=match, cookie=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
#priority=ofproto.OFP_DEFAULT_PRIORITY,
priority=100,
flags=ofproto.OFPFF_SEND_FLOW_REM, actions=actions)
datapath.send_msg(mod)
class QuantumSwitchController(ControllerBase):
def __init__(self, req, link, data, **config):
super(QuantumSwitchController, self).__init__(req, link, data, **config)
self.name = self.__class__.__name__
self.logger = logging.getLogger(self.name)
#self._set_logger()
#self.logger.info("dir(data): " + `dir(data)`)
self.simpl_switch_spp = data[simple_switch_instance_name]
self.dpset = self.simpl_switch_spp.data['dpset']
try:
jsondict = json.load(open(qconf))
self.logger.info("JSON configuration loaded from %s:" % qconf)
except IOError:
try:
jsondict = json.load(open(qconf_default))
self.logger.info("JSON configuration loaded from %s:" % qconf_default)
except IOError:
self.logger.info("Can't open %s and %s" % (qconf,qconf_default))
self.dst = {}
self.channels = {}
try:
self.dst = jsondict["dst"]
self.channels = jsondict["channels"]
self.logger.info(json.dumps(jsondict))
except ValueError:
self.logger.info("JSON syntaxis error in %s" % qconf)
self.dst = {}
self.channels = {}
def set_flows(self, dp, f):
ofproto = dp.ofproto
delete_flow_entry(dp)
flow = {"priority":1000,
"match":{"dl_type": 0x0800, "nw_proto": 6,"in_port":f["iport"], "nw_dst":self.dst['nw'],"tp_dst":self.dst['tp']},
"actions":[{"type":"SET_NW_DST","nw_dst":f["nw"]},{"type":"SET_TP_DST","tp_dst":f["tp"]},
{"type":"SET_DL_DST","dl_dst":f["dl"]},{"type":"OUTPUT", "port":f["oport"]}]}
mod_flow_entry(dp, flow, ofproto.OFPFC_ADD)
flow = {"priority":1000,
"match":{"dl_type": 0x0800, "nw_proto": 6,"in_port":f["oport"],"nw_src":f["nw"],"tp_src":f["tp"]},
"actions":[{"type":"SET_NW_SRC","nw_src":self.dst['nw']},{"type":"SET_TP_SRC","tp_src":self.dst['tp']},
{"type":"SET_DL_SRC","dl_src":f["dl"]},{"type":"OUTPUT", "port":f["iport"]}]}
mod_flow_entry(dp, flow, ofproto.OFPFC_ADD)
@route('qkey', '/qkey/{channel}', methods=['POST'],
requirements={'channel': r'[0-9]'})
def post_handler(self, req, **kwargs):
# In JSON file channel number is the key (string)
channel = kwargs['channel']
c = self.channels[channel]
dp = self.dpset.get(c['dpid'])
qkey = req.body
addr = req.remote_addr
body = json.dumps({'channel': channel,
'qkey': qkey,
'addr': addr})
body = "ADDR: %s QKEY: %s\n" % (addr, qkey)
self.logger.info("body: " + body)
for url in c["qcrypt"]["key_poins"]:
body += "URL %s RESULT:\n" % url
try:
r = requests.post(url, qkey)
body += r.text
except: #requests.ConnectionError:
body += "URL %s: can't connect\n" % url
return Response(content_type='application/json', body=body)
@route('conf', '/conf', methods=['GET'])
def get_conf_handler(self, req, **kwargs):
body = json.dumps({'dst': self.dst,
'channels': self.channels})
return Response(content_type='application/json', body=body)
@route('conf', '/conf', methods=['POST'])
def set_conf_handler(self, req, **kwargs):
body = json.dumps({'dst': self.dst,
'channels': self.channels})
jsondict = json.loads(req.body)
self.dst = jsondict["dst"]
self.channels = jsondict["channels"]
f = open(qconf, "w+")
f.write(json.dumps({'dst': self.dst,
'channels': self.channels}))
f.close()
return Response(content_type='application/json', body=body)
@route('qchannel', '/qchannel/{channel}/{status}', methods=['GET'], requirements={'channel': r'[0-9]', 'status': r'[0-6]'})
def get_handler(self, req, **kwargs):
# In JSON file channel number is the key (string)
channel = kwargs['channel']
status = int(kwargs['status'])
result = -1
c = self.channels[channel]
dp = self.dpset.get(c['dpid'])
if status == 0:
result = c["scrypt"]
self.set_flows(dp, result)
elif status == 1:
result = c["qcrypt"]
self.set_flows(dp, result)
elif status == 2:
result = 'REREAD' # reread quantum key file
elif status == 3:
result = c["transp"]
self.set_flows(dp, result)
elif status == 4:
result = 'DELETE'
delete_flow_entry(dp)
elif status == 5:
result = os.system(qpath + '/bin/crypto_stat eth1 eth2')
elif status == 6:
p = os.popen(qpath + '/bin/crypto_stat_get eth1 eth2')
result = p.readlines()
body = json.dumps({'channel': channel, 'status': status, 'result': result})
return Response(content_type='application/json', body=body)
| itmo-infocom/qnet | of-qnet/qcrypt.py | Python | gpl-3.0 | 6,971 |
# coding=utf-8
# @license
# Copyright 2019-2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for representing a Neuroglancer coordinate space."""
import collections
import numpy as np
__all__ = []
def export(obj):
__all__.append(obj.__name__)
return obj
si_prefixes = {
'Y': 24,
'Z': 21,
'E': 18,
'P': 15,
'T': 12,
'G': 9,
'M': 6,
'k': 3,
'h': 2,
'': 0,
'c': -2,
'm': -3,
'u': -6,
'µ': -6,
'n': -9,
'p': -12,
'f': -15,
'a': -18,
'z': -21,
'y': -24,
}
si_units = ['m', 's', 'rad/s', 'Hz']
si_units_with_prefixes = {
'%s%s' % (prefix, unit): (unit, exponent)
for (prefix, exponent) in si_prefixes.items() for unit in si_units
}
si_units_with_prefixes[''] = ('', 0)
def parse_unit(scale, unit):
unit, exponent = si_units_with_prefixes[unit]
if exponent >= 0:
return (scale * 10**exponent, unit)
else:
return (scale / 10**(-exponent), unit)
@export
class CoordinateArray:
__slots__ = ('_data')
def __init__(self, json_data=None, labels=None, coordinates=None, mappings=None):
if mappings is None:
mappings = dict()
else:
mappings = dict(mappings)
if labels is not None:
if coordinates is None:
coordinates = range(len(labels))
for coordinate, label in zip(coordinates, labels):
mappings[coordinate] = label
if json_data is not None:
if not isinstance(json_data,
dict) or 'coordinates' not in json_data or 'labels' not in json_data:
raise ValueError('Expected object with "coordinates" and "labels" properties')
coordinates = json_data['coordinates']
labels = json_data['labels']
for coordinate, label in zip(coordinates, labels):
mappings[coordinate] = label
self._data = mappings
def __len__(self):
return len(self._data)
def __iter__(self):
return iter(self._data)
def __repr__(self):
return repr(self._data)
def __str__(self):
return str(self._data)
def __eq__(self, other):
if not isinstance(other, CoordinateArray): return False
return self._data == other._data
def __getitem__(self, k):
if isinstance(k, str):
for other_k, other_v in self._data.items():
if other_k == k: return other_v
raise KeyError('label not found: %r' % (k, ))
return self._data[k]
def to_json(self):
return dict(coordinates=list(self._data.keys()), labels=list(self._data.values()))
@export
class DimensionScale(collections.namedtuple('DimensionScale',
['scale', 'unit', 'coordinate_array'])):
__slots__ = ()
def __new__(cls, scale=1, unit='', coordinate_array=None):
return super(DimensionScale, cls).__new__(cls, scale, unit, coordinate_array)
@staticmethod
def from_json(json):
if isinstance(json, DimensionScale):
return json
if isinstance(json, list):
if len(json) != 2:
raise ValueError('Expected [scale, unit], but received: %r' % (json, ))
scale = json[0]
unit = json[1]
coordinate_array = None
else:
scale = None
unit = None
coordinate_array = CoordinateArray(json_data=json)
return DimensionScale(scale=scale, unit=unit, coordinate_array=coordinate_array)
@export
class CoordinateSpace(object):
__slots__ = ('names', 'scales', 'units', 'coordinate_arrays')
def __init__(self, json=None, names=None, scales=None, units=None, coordinate_arrays=None):
if json is None:
if names is not None:
self.names = tuple(names)
scales = np.array(scales, dtype=np.float64)
if isinstance(units, str):
units = tuple(units for _ in names)
scales_and_units = tuple(
parse_unit(scale, unit) for scale, unit in zip(scales, units))
scales = np.array([s[0] for s in scales_and_units], dtype=np.float64)
units = tuple(s[1] for s in scales_and_units)
if coordinate_arrays is None:
coordinate_arrays = tuple(None for x in units)
else:
coordinate_arrays = tuple(coordinate_arrays)
self.units = units
self.scales = scales
self.coordinate_arrays = coordinate_arrays
else:
self.names = ()
self.scales = np.zeros(0, dtype=np.float64)
self.units = ()
self.coordinate_arrays = ()
else:
if not isinstance(json, dict): raise TypeError
self.names = tuple(json.keys())
values = tuple(DimensionScale.from_json(v) for v in json.values())
self.scales = np.array([v.scale for v in values], dtype=np.float64)
self.units = tuple(v.unit for v in values)
self.coordinate_arrays = tuple(v.coordinate_array for v in values)
self.scales.setflags(write=False)
@property
def rank(self):
return len(self.names)
def __getitem__(self, i):
if isinstance(i, str):
idx = self.names.index(i)
return DimensionScale(scale=self.scales[idx],
unit=self.units[idx],
coordinate_array=self.coordinate_arrays[idx])
if isinstance(i, slice):
idxs = range(self.rank)[i]
return [
DimensionScale(scale=self.scales[j],
unit=self.units[j],
coordinate_array=self.coordinate_arrays[j]) for j in idxs
]
return DimensionScale(scale=self.scales[i],
unit=self.units[i],
coordinate_array=self.coordinate_arrays[i])
def __repr__(self):
return 'CoordinateSpace(%r)' % (self.to_json(), )
def to_json(self):
d = collections.OrderedDict()
for name, scale, unit, coordinate_array in zip(self.names, self.scales, self.units,
self.coordinate_arrays):
if coordinate_array is None:
d[name] = [scale, unit]
else:
d[name] = coordinate_array.to_json()
return d
| janelia-flyem/neuroglancer | python/neuroglancer/coordinate_space.py | Python | apache-2.0 | 7,132 |
import copy
import itertools
import re
import operator
from datetime import datetime, timedelta
from collections import defaultdict
import numpy as np
from pandas.core.base import PandasObject
from pandas.core.common import (_possibly_downcast_to_dtype, isnull,
_NS_DTYPE, _TD_DTYPE, ABCSeries, is_list_like,
ABCSparseSeries, _infer_dtype_from_scalar,
is_null_datelike_scalar, _maybe_promote,
is_timedelta64_dtype, is_datetime64_dtype,
array_equivalent, _maybe_convert_string_to_object,
is_categorical)
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import maybe_convert_indices, length_of_indexer
from pandas.core.categorical import Categorical, maybe_to_categorical
import pandas.core.common as com
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
import pandas.tslib as tslib
import pandas.computation.expressions as expressions
from pandas.util.decorators import cache_readonly
from pandas.tslib import Timestamp, Timedelta
from pandas import compat
from pandas.compat import range, map, zip, u
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
from pandas.lib import BlockPlacement
class Block(PandasObject):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas
data structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['_mgr_locs', 'values', 'ndim']
is_numeric = False
is_float = False
is_integer = False
is_complex = False
is_datetime = False
is_timedelta = False
is_bool = False
is_object = False
is_categorical = False
is_sparse = False
_can_hold_na = False
_downcast_dtype = None
_can_consolidate = True
_verify_integrity = True
_validate_ndim = True
_ftype = 'dense'
_holder = None
def __init__(self, values, placement, ndim=None, fastpath=False):
if ndim is None:
ndim = values.ndim
elif values.ndim != ndim:
raise ValueError('Wrong number of dimensions')
self.ndim = ndim
self.mgr_locs = placement
self.values = values
if len(self.mgr_locs) != len(self.values):
raise ValueError('Wrong number of items passed %d,'
' placement implies %d' % (
len(self.values), len(self.mgr_locs)))
@property
def _consolidate_key(self):
return (self._can_consolidate, self.dtype.name)
@property
def _is_single_block(self):
return self.ndim == 1
@property
def is_view(self):
""" return a boolean if I am possibly a view """
return self.values.base is not None
@property
def is_datelike(self):
""" return True if I am a non-datelike """
return self.is_datetime or self.is_timedelta
def is_categorical_astype(self, dtype):
"""
validate that we have a astypeable to categorical,
returns a boolean if we are a categorical
"""
if com.is_categorical_dtype(dtype):
if dtype == com.CategoricalDtype():
return True
# this is a pd.Categorical, but is not
# a valid type for astypeing
raise TypeError("invalid type {0} for astype".format(dtype))
return False
def to_dense(self):
return self.values.view()
@property
def fill_value(self):
return np.nan
@property
def mgr_locs(self):
return self._mgr_locs
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return self.dtype
def make_block_same_class(self, values, placement, copy=False, fastpath=True,
**kwargs):
"""
Wrap given values in a block of same type as self.
`kwargs` are used in SparseBlock override.
"""
if copy:
values = values.copy()
return make_block(values, placement, klass=self.__class__,
fastpath=fastpath, **kwargs)
@mgr_locs.setter
def mgr_locs(self, new_mgr_locs):
if not isinstance(new_mgr_locs, BlockPlacement):
new_mgr_locs = BlockPlacement(new_mgr_locs)
self._mgr_locs = new_mgr_locs
def __unicode__(self):
# don't want to print out all of the items here
name = com.pprint_thing(self.__class__.__name__)
if self._is_single_block:
result = '%s: %s dtype: %s' % (
name, len(self), self.dtype)
else:
shape = ' x '.join([com.pprint_thing(s) for s in self.shape])
result = '%s: %s, %s, dtype: %s' % (
name, com.pprint_thing(self.mgr_locs.indexer), shape,
self.dtype)
return result
def __len__(self):
return len(self.values)
def __getstate__(self):
return self.mgr_locs.indexer, self.values
def __setstate__(self, state):
self.mgr_locs = BlockPlacement(state[0])
self.values = state[1]
self.ndim = self.values.ndim
def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
def reshape_nd(self, labels, shape, ref_items):
"""
Parameters
----------
labels : list of new axis labels
shape : new shape
ref_items : new ref_items
return a new block that is transformed to a nd block
"""
return _block2d_to_blocknd(
values=self.get_values().T,
placement=self.mgr_locs,
shape=shape,
labels=labels,
ref_items=ref_items)
def getitem_block(self, slicer, new_mgr_locs=None):
"""
Perform __getitem__-like, return result as block.
As of now, only supports slices that preserve dimensionality.
"""
if new_mgr_locs is None:
if isinstance(slicer, tuple):
axis0_slicer = slicer[0]
else:
axis0_slicer = slicer
new_mgr_locs = self.mgr_locs[axis0_slicer]
new_values = self._slice(slicer)
if self._validate_ndim and new_values.ndim != self.ndim:
raise ValueError("Only same dim slicing is allowed")
return self.make_block_same_class(new_values, new_mgr_locs)
@property
def shape(self):
return self.values.shape
@property
def itemsize(self):
return self.values.itemsize
@property
def dtype(self):
return self.values.dtype
@property
def ftype(self):
return "%s:%s" % (self.dtype, self._ftype)
def merge(self, other):
return _merge_blocks([self, other])
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
if fill_value is None:
fill_value = self.fill_value
new_values = com.take_nd(self.values, indexer, axis,
fill_value=fill_value, mask_info=mask_info)
return make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def iget(self, i):
return self.values[i]
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
self.values[locs] = values
def delete(self, loc):
"""
Delete given loc(-s) from block in-place.
"""
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
def apply(self, func, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
result = make_block(values=_block_shape(result), placement=self.mgr_locs,)
return result
def fillna(self, value, limit=None, inplace=False, downcast=None):
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError
mask[mask.cumsum(self.ndim-1)>limit]=False
value = self._try_fill(value)
blocks = self.putmask(mask, value, inplace=inplace)
return self._maybe_downcast(blocks, downcast)
def _maybe_downcast(self, blocks, downcast=None):
# no need to downcast our float
# unless indicated
if downcast is None and self.is_float:
return blocks
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
result_blocks = []
for b in blocks:
result_blocks.extend(b.downcast(downcast))
return result_blocks
def downcast(self, dtypes=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
return [self]
values = self.values
# single block handling
if self._is_single_block:
# try to cast all non-floats here
if dtypes is None:
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
return [make_block(nv, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
# ndim > 1
if dtypes is None:
return [self]
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
"its argument")
# item-by-item
# this is expensive as it splits the blocks items-by-item
blocks = []
for i, rl in enumerate(self.mgr_locs):
if dtypes == 'infer':
dtype = 'infer'
else:
raise AssertionError("dtypes as dict is not supported yet")
dtype = dtypes.get(item, self._downcast_dtype)
if dtype is None:
nv = _block_shape(values[i], ndim=self.ndim)
else:
nv = _possibly_downcast_to_dtype(values[i], dtype)
nv = _block_shape(nv, ndim=self.ndim)
blocks.append(make_block(nv,
ndim=self.ndim, fastpath=True,
placement=[rl]))
return blocks
def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
return self._astype(dtype, copy=copy, raise_on_error=raise_on_error,
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
# may need to convert to categorical
# this is only called for non-categoricals
if self.is_categorical_astype(dtype):
return make_block(Categorical(self.values, **kwargs),
ndim=self.ndim,
placement=self.mgr_locs)
# astype processing
dtype = np.dtype(dtype)
if self.dtype == dtype:
if copy:
return self.copy()
return self
if klass is None:
if dtype == np.object_:
klass = ObjectBlock
try:
# force the copy here
if values is None:
# _astype_nansafe works fine with 1-d only
values = com._astype_nansafe(self.values.ravel(), dtype, copy=True)
values = values.reshape(self.values.shape)
newb = make_block(values,
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True, dtype=dtype, klass=klass)
except:
if raise_on_error is True:
raise
newb = self.copy() if copy else self
if newb.is_numeric and self.is_numeric:
if newb.shape != self.shape:
raise TypeError("cannot set astype for copy = [%s] for dtype "
"(%s [%s]) with smaller itemsize that current "
"(%s [%s])" % (copy, self.dtype.name,
self.itemsize, newb.dtype.name,
newb.itemsize))
return newb
def convert(self, copy=True, **kwargs):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
return [self.copy()] if copy else [self]
def _can_hold_element(self, value):
raise NotImplementedError()
def _try_cast(self, value):
raise NotImplementedError()
def _try_cast_result(self, result, dtype=None):
""" try to cast the result to our original type,
we may have roundtripped thru object in the mean-time """
if dtype is None:
dtype = self.dtype
if self.is_integer or self.is_bool or self.is_datetime:
pass
elif self.is_float and result.dtype == self.dtype:
# protect against a bool/object showing up here
if isinstance(dtype, compat.string_types) and dtype == 'infer':
return result
if not isinstance(dtype, type):
dtype = dtype.type
if issubclass(dtype, (np.bool_, np.object_)):
if issubclass(dtype, np.bool_):
if isnull(result).all():
return result.astype(np.bool_)
else:
result = result.astype(np.object_)
result[result == 1] = True
result[result == 0] = False
return result
else:
return result.astype(np.object_)
return result
# may need to change the dtype here
return _possibly_downcast_to_dtype(result, dtype)
def _try_operate(self, values):
""" return a version to operate on as the input """
return values
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
return result
def _try_coerce_and_cast_result(self, result, dtype=None):
result = self._try_coerce_result(result)
result = self._try_cast_result(result, dtype=dtype)
return result
def _try_fill(self, value):
return value
def to_native_types(self, slicer=None, na_rep='', **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
values = np.array(values, dtype=object)
mask = isnull(values)
values[mask] = na_rep
return values.tolist()
# block actions ####
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, ndim=self.ndim,
klass=self.__class__, fastpath=True,
placement=self.mgr_locs)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
mask = com.mask_missing(self.values, to_replace)
if filter is not None:
filtered_out = ~self.mgr_locs.isin(filter)
mask[filtered_out.nonzero()[0]] = False
if not mask.any():
if inplace:
return [self]
return [self.copy()]
return self.putmask(mask, value, inplace=inplace)
def setitem(self, indexer, value):
""" set the value inplace; return a new block (of a possibly different
dtype)
indexer is a direct slice/positional indexer; value must be a
compatible shape
"""
# coerce None values, if appropriate
if value is None:
if self.is_numeric:
value = np.nan
# coerce args
values, value = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
if not self._can_hold_element(value):
dtype, _ = com._maybe_promote(arr_value.dtype)
values = values.astype(dtype)
transf = (lambda x: x.T) if self.ndim == 2 else (lambda x: x)
values = transf(values)
l = len(values)
# length checking
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (isinstance(indexer, np.ndarray) and
indexer.dtype == np.bool_ and
len(indexer[indexer]) == len(value)):
raise ValueError("cannot set using a list-like indexer "
"with a different length than the value")
# slice
elif isinstance(indexer, slice):
if is_list_like(value) and l:
if len(value) != length_of_indexer(indexer, values):
raise ValueError("cannot set using a slice indexer with a "
"different length than the value")
try:
def _is_scalar_indexer(indexer):
# return True if we are all scalar indexers
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ np.isscalar(idx) for idx in indexer ])
return False
def _is_empty_indexer(indexer):
# return a boolean if we have an empty indexer
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return all([ isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer ])
return False
# empty indexers
# 8669 (empty)
if _is_empty_indexer(indexer):
pass
# setting a single element for each dim and with a rhs that could be say a list
# GH 6043
elif _is_scalar_indexer(indexer):
values[indexer] = value
# if we are an exact match (ex-broadcasting),
# then use the resultant dtype
elif len(arr_value.shape) and arr_value.shape[0] == values.shape[0] and np.prod(arr_value.shape) == np.prod(values.shape):
values[indexer] = value
values = values.astype(arr_value.dtype)
# set
else:
values[indexer] = value
# coerce and try to infer the dtypes of the result
if np.isscalar(value):
dtype, _ = _infer_dtype_from_scalar(value)
else:
dtype = 'infer'
values = self._try_coerce_and_cast_result(values, dtype)
block = make_block(transf(values),
ndim=self.ndim, placement=self.mgr_locs,
fastpath=True)
# may have to soft convert_objects here
if block.is_object and not self.is_object:
block = block.convert(convert_numeric=False)
return block
except (ValueError, TypeError) as detail:
raise
except Exception as detail:
pass
return [self]
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
# may need to align the new
if hasattr(new, 'reindex_axis'):
new = new.values.T
# may need to align the mask
if hasattr(mask, 'reindex_axis'):
mask = mask.values.T
# if we are passed a scalar None, convert it here
if not is_list_like(new) and isnull(new) and not self.is_object:
new = self.fill_value
if self._can_hold_element(new):
new = self._try_cast(new)
# pseudo-broadcast
if isinstance(new, np.ndarray) and new.ndim == self.ndim - 1:
new = np.repeat(new, self.shape[-1]).reshape(self.shape)
np.putmask(new_values, mask, new)
# maybe upcast me
elif mask.any():
# need to go column by column
new_blocks = []
if self.ndim > 1:
for i, ref_loc in enumerate(self.mgr_locs):
m = mask[i]
v = new_values[i]
# need a new block
if m.any():
n = new[i] if isinstance(
new, np.ndarray) else np.array(new)
# type of the new block
dtype, _ = com._maybe_promote(n.dtype)
# we need to exiplicty astype here to make a copy
n = n.astype(dtype)
nv = _putmask_smart(v, m, n)
else:
nv = v if inplace else v.copy()
# Put back the dimension that was taken from it and make
# a block out of the result.
block = make_block(values=nv[np.newaxis],
placement=[ref_loc],
fastpath=True)
new_blocks.append(block)
else:
nv = _putmask_smart(new_values, mask, new)
new_blocks.append(make_block(values=nv,
placement=self.mgr_locs,
fastpath=True))
return new_blocks
if inplace:
return [self]
return [make_block(new_values,
placement=self.mgr_locs, fastpath=True)]
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
fill_value=None, coerce=False, downcast=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
# timedelta subclasses IntBlock
if (self.is_bool or self.is_integer) and not self.is_timedelta:
if inplace:
return self
else:
return self.copy()
# a fill na type method
try:
m = com._clean_fill_method(method)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate_with_fill(method=m,
axis=axis,
inplace=inplace,
limit=limit,
fill_value=fill_value,
coerce=coerce,
downcast=downcast)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
except:
m = None
if m is not None:
r = check_int_bool(self, inplace)
if r is not None:
return r
return self._interpolate(method=m,
index=index,
values=values,
axis=axis,
limit=limit,
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
downcast=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
# if the block can't hold the type
if coerce:
if not self._can_hold_na:
if inplace:
return [self]
else:
return [self.copy()]
fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
fill_value=fill_value,
dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [make_block(values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
inplace=False, downcast=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
# only deal with floats
if not self.is_float:
if not self.is_integer:
return self
data = data.astype(np.float64)
if fill_value is None:
fill_value = self.fill_value
if method in ('krogh', 'piecewise_polynomial', 'pchip'):
if not index.is_monotonic:
raise ValueError("{0} interpolation requires that the "
"index be monotonic.".format(method))
# process 1-d slices in the axis direction
def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
# i.e. not an arg to com.interpolate_1d
return com.interpolate_1d(index, x, method=method, limit=limit,
fill_value=fill_value,
bounds_error=False, **kwargs)
# interp each column independently
interp_values = np.apply_along_axis(func, axis, data)
blocks = [make_block(interp_values,
ndim=self.ndim, klass=self.__class__,
fastpath=True, placement=self.mgr_locs)]
return self._maybe_downcast(blocks, downcast)
def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = self.fill_value
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=False)
else:
fill_value = fill_tuple[0]
new_values = com.take_nd(self.get_values(), indexer, axis=axis,
allow_fill=True, fill_value=fill_value)
if new_mgr_locs is None:
if axis == 0:
slc = lib.indexer_as_slice(indexer)
if slc is not None:
new_mgr_locs = self.mgr_locs[slc]
else:
new_mgr_locs = self.mgr_locs[indexer]
else:
new_mgr_locs = self.mgr_locs
if new_values.dtype != self.dtype:
return make_block(new_values, new_mgr_locs)
else:
return self.make_block_same_class(new_values, new_mgr_locs)
def get_values(self, dtype=None):
return self.values
def diff(self, n):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=1)
return [make_block(values=new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(self.values)
# make sure array sent to np.roll is c_contiguous
f_ordered = new_values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if np.prod(new_values.shape):
new_values = np.roll(new_values, com._ensure_platform_int(periods), axis=axis)
axis_indexer = [ slice(None) ] * self.ndim
if periods > 0:
axis_indexer[axis] = slice(None,periods)
else:
axis_indexer[axis] = slice(periods,None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return [make_block(new_values,
ndim=self.ndim, fastpath=True,
placement=self.mgr_locs)]
def eval(self, func, other, raise_on_error=True, try_cast=False):
"""
evaluate the block; return result block from the result
Parameters
----------
func : how to combine self, other
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block, the result of the func
"""
values = self.values
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim:
is_transposed = True
else:
if values.shape == other.shape[::-1]:
is_transposed = True
elif values.shape[0] == other.shape[-1]:
is_transposed = True
else:
# this is a broadcast error heree
raise ValueError("cannot broadcast shape [%s] with block "
"values [%s]" % (values.T.shape,
other.shape))
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
values, other = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
return self._try_coerce_result(func(values, other))
# error handler if we have an issue operating with the function
def handle_error():
if raise_on_error:
raise TypeError('Could not operate %s with block values %s'
% (repr(other), str(detail)))
else:
# return the values
result = np.empty(values.shape, dtype='O')
result.fill(np.nan)
return result
# get the result
try:
result = get_result(other)
# if we have an invalid shape/broadcast error
# GH4576, so raise instead of allowing to pass through
except ValueError as detail:
raise
except Exception as detail:
result = handle_error()
# technically a broadcast error in numpy can 'work' by returning a
# boolean False
if not isinstance(result, np.ndarray):
if not isinstance(result, np.ndarray):
# differentiate between an invalid ndarray-ndarray comparison
# and an invalid type comparison
if isinstance(values, np.ndarray) and is_list_like(other):
raise ValueError('Invalid broadcasting comparison [%s] '
'with block values' % repr(other))
raise TypeError('Could not compare [%s] with block values'
% repr(other))
# transpose if needed
result = transf(result)
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return [make_block(result, ndim=self.ndim,
fastpath=True, placement=self.mgr_locs)]
def where(self, other, cond, align=True, raise_on_error=True,
try_cast=False):
"""
evaluate the block; return result block(s) from the result
Parameters
----------
other : a ndarray/object
cond : the condition to respect
align : boolean, perform alignment on other/cond
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
Returns
-------
a new block(s), the result of the func
"""
values = self.values
# see if we can align other
if hasattr(other, 'reindex_axis'):
other = other.values
# make sure that we can broadcast
is_transposed = False
if hasattr(other, 'ndim') and hasattr(values, 'ndim'):
if values.ndim != other.ndim or values.shape == other.shape[::-1]:
# if its symmetric are ok, no reshaping needed (GH 7506)
if (values.shape[0] == np.array(values.shape)).all():
pass
# pseodo broadcast (its a 2d vs 1d say and where needs it in a
# specific direction)
elif (other.ndim >= 1 and values.ndim - 1 == other.ndim and
values.shape[0] != other.shape[0]):
other = _block_shape(other).T
else:
values = values.T
is_transposed = True
# see if we can align cond
if not hasattr(cond, 'shape'):
raise ValueError(
"where must have a condition that is ndarray like")
if hasattr(cond, 'reindex_axis'):
cond = cond.values
# may need to undo transpose of values
if hasattr(values, 'ndim'):
if values.ndim != cond.ndim or values.shape == cond.shape[::-1]:
values = values.T
is_transposed = not is_transposed
other = _maybe_convert_string_to_object(other)
# our where function
def func(c, v, o):
if c.ravel().all():
return v
v, o = self._try_coerce_args(v, o)
try:
return self._try_coerce_result(
expressions.where(c, v, o, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
'[%s]' % (repr(o), str(detail)))
else:
# return the values
result = np.empty(v.shape, dtype='float64')
result.fill(np.nan)
return result
# see if we can operate on the entire block, or need item-by-item
# or if we are a single block (ndim == 1)
result = func(cond, values, other)
if self._can_hold_na or self.ndim == 1:
if not isinstance(result, np.ndarray):
raise TypeError('Could not compare [%s] with block values'
% repr(other))
if is_transposed:
result = result.T
# try to cast if requested
if try_cast:
result = self._try_cast_result(result)
return make_block(result,
ndim=self.ndim, placement=self.mgr_locs)
# might need to separate out blocks
axis = cond.ndim - 1
cond = cond.swapaxes(axis, 0)
mask = np.array([cond[i].all() for i in range(cond.shape[0])],
dtype=bool)
result_blocks = []
for m in [mask, ~mask]:
if m.any():
r = self._try_cast_result(
result.take(m.nonzero()[0], axis=axis))
result_blocks.append(make_block(r.T,
placement=self.mgr_locs[m]))
return result_blocks
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
return array_equivalent(self.values, other.values)
class NonConsolidatableMixIn(object):
""" hold methods for the nonconsolidatable blocks """
_can_consolidate = False
_verify_integrity = False
_validate_ndim = False
_holder = None
def __init__(self, values, placement,
ndim=None, fastpath=False,):
# Placement must be converted to BlockPlacement via property setter
# before ndim logic, because placement may be a slice which doesn't
# have a length.
self.mgr_locs = placement
# kludgetastic
if ndim is None:
if len(self.mgr_locs) != 1:
ndim = 1
else:
ndim = 2
self.ndim = ndim
if not isinstance(values, self._holder):
raise TypeError("values must be {0}".format(self._holder.__name__))
self.values = values
def get_values(self, dtype=None):
""" need to to_dense myself (and always return a ndim sized object) """
values = self.values.to_dense()
if values.ndim == self.ndim - 1:
values = values.reshape((1,) + values.shape)
return values
def iget(self, col):
if self.ndim == 2 and isinstance(col, tuple):
col, loc = col
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values[loc]
else:
if col != 0:
raise IndexError("{0} only contains one item".format(self))
return self.values
def should_store(self, value):
return isinstance(value, self._holder)
def set(self, locs, values, check=False):
assert locs.tolist() == [0]
self.values = values
def get(self, item):
if self.ndim == 1:
loc = self.items.get_loc(item)
return self.values[loc]
else:
return self.values
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
def _try_cast_result(self, result, dtype=None):
return result
class NumericBlock(Block):
__slots__ = ()
is_numeric = True
_can_hold_na = True
class FloatOrComplexBlock(NumericBlock):
__slots__ = ()
def equals(self, other):
if self.dtype != other.dtype or self.shape != other.shape: return False
left, right = self.values, other.values
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
class FloatBlock(FloatOrComplexBlock):
__slots__ = ()
is_float = True
_downcast_dtype = 'int64'
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, (np.floating, np.integer)) and not issubclass(
tipo, (np.datetime64, np.timedelta64))
return isinstance(element, (float, int, np.float_, np.int_)) and not isinstance(
element, (bool, np.bool_, datetime, timedelta, np.datetime64, np.timedelta64))
def _try_cast(self, element):
try:
return float(element)
except: # pragma: no cover
return element
def to_native_types(self, slicer=None, na_rep='', float_format=None, decimal='.',
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
values = np.array(values, dtype=object)
mask = isnull(values)
values[mask] = na_rep
if float_format and decimal != '.':
formatter = lambda v : (float_format % v).replace('.',decimal,1)
elif decimal != '.':
formatter = lambda v : ('%g' % v).replace('.',decimal,1)
elif float_format:
formatter = lambda v : float_format % v
else:
formatter = None
if formatter:
imask = (~mask).ravel()
values.flat[imask] = np.array(
[formatter(val) for val in values.ravel()[imask]])
return values.tolist()
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return (issubclass(value.dtype.type, np.floating) and
value.dtype == self.dtype)
class ComplexBlock(FloatOrComplexBlock):
__slots__ = ()
is_complex = True
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, (np.floating, np.integer, np.complexfloating))
return (isinstance(element, (float, int, complex, np.float_, np.int_)) and
not isinstance(bool, np.bool_))
def _try_cast(self, element):
try:
return complex(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.complexfloating)
class IntBlock(NumericBlock):
__slots__ = ()
is_integer = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
tipo = element.dtype.type
return issubclass(tipo, np.integer) and not issubclass(tipo, (np.datetime64, np.timedelta64))
return com.is_integer(element)
def _try_cast(self, element):
try:
return int(element)
except: # pragma: no cover
return element
def should_store(self, value):
return com.is_integer_dtype(value) and value.dtype == self.dtype
class TimeDeltaBlock(IntBlock):
__slots__ = ()
is_timedelta = True
_can_hold_na = True
is_numeric = False
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
elif isinstance(value, Timedelta):
value = value.value
elif isinstance(value, np.timedelta64):
pass
elif com.is_integer(value):
# coerce to seconds of timedelta
value = np.timedelta64(int(value * 1e9))
elif isinstance(value, timedelta):
value = np.timedelta64(value)
return value
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments
we are going to compare vs i8, so coerce to floats
repring NaT with np.nan so nans propagate
values is always ndarray like, other may not be """
def masker(v):
mask = isnull(v)
v = v.view('i8').astype('float64')
v[mask] = np.nan
return v
values = masker(values)
if is_null_datelike_scalar(other):
other = np.nan
elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
if other == tslib.iNaT:
other = np.nan
else:
other = masker(other)
return values, other
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_result(self, result):
""" reverse of try_coerce_args / try_operate """
if isinstance(result, np.ndarray):
mask = isnull(result)
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('m8[ns]')
result[mask] = tslib.iNaT
elif isinstance(result, np.integer):
result = lib.Timedelta(result)
return result
def should_store(self, value):
return issubclass(value.dtype.type, np.timedelta64)
def to_native_types(self, slicer=None, na_rep=None, **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
#### FIXME ####
# should use the core.format.Timedelta64Formatter here
# to figure what format to pass to the Timedelta
# e.g. to not show the decimals say
rvalues.flat[imask] = np.array([Timedelta(val)._repr_base(format='all')
for val in values.ravel()[imask]],
dtype=object)
return rvalues.tolist()
def get_values(self, dtype=None):
# return object dtypes as Timedelta
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timedelta
).reshape(self.values.shape)
return self.values
class BoolBlock(NumericBlock):
__slots__ = ()
is_bool = True
_can_hold_na = False
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return issubclass(element.dtype.type, np.integer)
return isinstance(element, (int, bool))
def _try_cast(self, element):
try:
return bool(element)
except: # pragma: no cover
return element
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
class ObjectBlock(Block):
__slots__ = ()
is_object = True
_can_hold_na = True
def __init__(self, values, ndim=2, fastpath=False,
placement=None):
if issubclass(values.dtype.type, compat.string_types):
values = np.array(values, dtype=object)
super(ObjectBlock, self).__init__(values, ndim=ndim,
fastpath=fastpath,
placement=placement)
@property
def is_bool(self):
""" we can be a bool if we have only bool values but are of type
object
"""
return lib.is_bool_array(self.values.ravel())
def convert(self, convert_dates=True, convert_numeric=True, convert_timedeltas=True,
copy=True, by_item=True):
""" attempt to coerce any object types to better types
return a copy of the block (if copy = True)
by definition we ARE an ObjectBlock!!!!!
can return multiple blocks!
"""
# attempt to create new type blocks
blocks = []
if by_item and not self._is_single_block:
for i, rl in enumerate(self.mgr_locs):
values = self.iget(i)
values = com._possibly_convert_objects(
values.ravel(), convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
).reshape(values.shape)
values = _block_shape(values, ndim=self.ndim)
newb = make_block(values,
ndim=self.ndim, placement=[rl])
blocks.append(newb)
else:
values = com._possibly_convert_objects(
self.values.ravel(), convert_dates=convert_dates,
convert_numeric=convert_numeric
).reshape(self.values.shape)
blocks.append(make_block(values,
ndim=self.ndim, placement=self.mgr_locs))
return blocks
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
# GH6026
if check:
try:
if (self.values[locs] == values).all():
return
except:
pass
try:
self.values[locs] = values
except (ValueError):
# broadcasting error
# see GH6171
new_shape = list(values.shape)
new_shape[0] = len(self.items)
self.values = np.empty(tuple(new_shape),dtype=self.dtype)
self.values.fill(np.nan)
self.values[locs] = values
def _maybe_downcast(self, blocks, downcast=None):
if downcast is not None:
return blocks
# split and convert the blocks
result_blocks = []
for blk in blocks:
result_blocks.extend(blk.convert(convert_dates=True,
convert_numeric=False))
return result_blocks
def _can_hold_element(self, element):
return True
def _try_cast(self, element):
return element
def should_store(self, value):
return not (issubclass(value.dtype.type,
(np.integer, np.floating, np.complexfloating,
np.datetime64, np.bool_)) or com.is_categorical_dtype(value))
def replace(self, to_replace, value, inplace=False, filter=None,
regex=False):
blk = [self]
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
if not either_list and com.is_re(to_replace):
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=True)
elif not (either_list or regex):
blk = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter, regex=regex)
elif both_lists:
for to_rep, v in zip(to_replace, value):
blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
filter=filter, regex=regex)
elif to_rep_is_list and regex:
for to_rep in to_replace:
blk[0], = blk[0]._replace_single(to_rep, value,
inplace=inplace,
filter=filter, regex=regex)
else:
blk[0], = blk[0]._replace_single(to_replace, value,
inplace=inplace, filter=filter,
regex=regex)
return blk
def _replace_single(self, to_replace, value, inplace=False, filter=None,
regex=False):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
# regex is regex compilable
regex_re = com.is_re_compilable(regex)
# only one will survive
if to_rep_re and regex_re:
raise AssertionError('only one of to_replace and regex can be '
'regex compilable')
# if regex was passed as something that can be a regex (rather than a
# boolean)
if regex_re:
to_replace = regex
regex = regex_re or to_rep_re
# try to get the pattern attribute (compiled re) or it's a string
try:
pattern = to_replace.pattern
except AttributeError:
pattern = to_replace
# if the pattern is not empty and to_replace is either a string or a
# regex
if regex and pattern:
rx = re.compile(to_replace)
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
result = super(ObjectBlock, self).replace(to_replace, value,
inplace=inplace,
filter=filter,
regex=regex)
if not isinstance(result, list):
result = [result]
return result
new_values = self.values if inplace else self.values.copy()
# deal with replacing values with objects (strings) that match but
# whose replacement is not a string (numeric, nan, object)
if isnull(value) or not isinstance(value, compat.string_types):
def re_replacer(s):
try:
return value if rx.search(s) is not None else s
except TypeError:
return s
else:
# value is guaranteed to be a string here, s can be either a string
# or null if it's null it gets returned
def re_replacer(s):
try:
return rx.sub(value, s)
except TypeError:
return s
f = np.vectorize(re_replacer, otypes=[self.dtype])
if filter is None:
filt = slice(None)
else:
filt = self.mgr_locs.isin(filter).nonzero()[0]
new_values[filt] = f(new_values[filt])
return [self if inplace else
make_block(new_values,
fastpath=True, placement=self.mgr_locs)]
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
is_categorical = True
_can_hold_na = True
_holder = Categorical
def __init__(self, values, placement,
fastpath=False, **kwargs):
# coerce to categorical if we can
super(CategoricalBlock, self).__init__(maybe_to_categorical(values),
fastpath=True, placement=placement,
**kwargs)
@property
def is_view(self):
""" I am never a view """
return False
def to_dense(self):
return self.values.to_dense().view()
@property
def shape(self):
return (len(self.mgr_locs), len(self.values))
@property
def array_dtype(self):
""" the dtype to return if I want to construct this block as an array """
return np.object_
def _slice(self, slicer):
""" return a slice of my values """
# slice the category
# return same dims as we currently have
return self.values._slice(slicer)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.fillna(fill_value=value,
limit=limit),
placement=self.mgr_locs)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = self.values if inplace else self.values.copy()
return self.make_block_same_class(values=values.fillna(fill_value=fill_value,
method=method,
limit=limit),
placement=self.mgr_locs)
def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
"""
Take values according to indexer and return them as a block.bb
"""
if fill_tuple is None:
fill_value = None
else:
fill_value = fill_tuple[0]
# axis doesn't matter; we are really a single-dim object
# but are passed the axis depending on the calling routing
# if its REALLY axis 0, then this will be a reindex and not a take
new_values = self.values.take_nd(indexer, fill_value=fill_value)
# if we are a 1-dim object, then always place at 0
if self.ndim == 1:
new_mgr_locs = [0]
else:
if new_mgr_locs is None:
new_mgr_locs = self.mgr_locs
return self.make_block_same_class(new_values, new_mgr_locs)
def putmask(self, mask, new, align=True, inplace=False):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
return the resulting block(s)
Parameters
----------
mask : the condition to respect
new : a ndarray/object
align : boolean, perform alignment on other/cond, default is True
inplace : perform inplace modification, default is False
Returns
-------
a new block(s), the result of the putmask
"""
new_values = self.values if inplace else self.values.copy()
new_values[mask] = new
return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
klass=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
"""
if self.is_categorical_astype(dtype):
values = self.values
else:
values = np.asarray(self.values).astype(dtype, copy=False)
if copy:
values = values.copy()
return make_block(values,
ndim=self.ndim,
placement=self.mgr_locs)
def to_native_types(self, slicer=None, na_rep='', **kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
# Categorical is always one dimension
values = values[slicer]
values = np.array(values, dtype=object)
mask = isnull(values)
values[mask] = na_rep
# Blocks.to_native_type returns list of lists, but we are always only a list
return [values.tolist()]
class DatetimeBlock(Block):
__slots__ = ()
is_datetime = True
_can_hold_na = True
def __init__(self, values, placement,
fastpath=False, **kwargs):
if values.dtype != _NS_DTYPE:
values = tslib.cast_to_nanoseconds(values)
super(DatetimeBlock, self).__init__(values,
fastpath=True, placement=placement,
**kwargs)
def _can_hold_element(self, element):
if is_list_like(element):
element = np.array(element)
return element.dtype == _NS_DTYPE or element.dtype == np.int64
return (com.is_integer(element) or
isinstance(element, datetime) or
isnull(element))
def _try_cast(self, element):
try:
return int(element)
except:
return element
def _try_operate(self, values):
""" return a version to operate on """
return values.view('i8')
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments
we are going to compare vs i8, so coerce to integer
values is always ndarra like, other may not be """
values = values.view('i8')
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, datetime):
other = lib.Timestamp(other).asm8.view('i8')
else:
other = other.view('i8')
return values, other
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
if isinstance(result, np.ndarray):
if result.dtype.kind in ['i', 'f', 'O']:
result = result.astype('M8[ns]')
elif isinstance(result, (np.integer, np.datetime64)):
result = lib.Timestamp(result)
return result
@property
def fill_value(self):
return tslib.iNaT
def _try_fill(self, value):
""" if we are a NaT, return the actual fill value """
if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
value = tslib.iNaT
return value
def fillna(self, value, limit=None,
inplace=False, downcast=None):
# straight putmask here
values = self.values if inplace else self.values.copy()
mask = isnull(self.values)
value = self._try_fill(value)
if limit is not None:
if self.ndim > 2:
raise NotImplementedError
mask[mask.cumsum(self.ndim-1)>limit]=False
np.putmask(values, mask, value)
return [self if inplace else
make_block(values,
fastpath=True, placement=self.mgr_locs)]
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
**kwargs):
""" convert to our native types format, slicing if desired """
values = self.values
if slicer is not None:
values = values[:, slicer]
mask = isnull(values)
rvalues = np.empty(values.shape, dtype=object)
if na_rep is None:
na_rep = 'NaT'
rvalues[mask] = na_rep
imask = (~mask).ravel()
if date_format is None:
date_formatter = lambda x: Timestamp(x)._repr_base
else:
date_formatter = lambda x: Timestamp(x).strftime(date_format)
rvalues.flat[imask] = np.array([date_formatter(val) for val in
values.ravel()[imask]], dtype=object)
return rvalues.tolist()
def should_store(self, value):
return issubclass(value.dtype.type, np.datetime64)
def set(self, locs, values, check=False):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
if values.dtype != _NS_DTYPE:
# Workaround for numpy 1.6 bug
values = tslib.cast_to_nanoseconds(values)
self.values[locs] = values
def get_values(self, dtype=None):
# return object dtype as Timestamps
if dtype == object:
return lib.map_infer(self.values.ravel(), lib.Timestamp)\
.reshape(self.values.shape)
return self.values
class SparseBlock(NonConsolidatableMixIn, Block):
""" implement as a list of sparse arrays of the same dtype """
__slots__ = ()
is_sparse = True
is_numeric = True
_can_hold_na = True
_ftype = 'sparse'
_holder = SparseArray
@property
def shape(self):
return (len(self.mgr_locs), self.sp_index.length)
@property
def itemsize(self):
return self.dtype.itemsize
@property
def fill_value(self):
#return np.nan
return self.values.fill_value
@fill_value.setter
def fill_value(self, v):
# we may need to upcast our fill to match our dtype
if issubclass(self.dtype.type, np.floating):
v = float(v)
self.values.fill_value = v
@property
def sp_values(self):
return self.values.sp_values
@sp_values.setter
def sp_values(self, v):
# reset the sparse values
self.values = SparseArray(v, sparse_index=self.sp_index,
kind=self.kind, dtype=v.dtype,
fill_value=self.values.fill_value,
copy=False)
@property
def sp_index(self):
return self.values.sp_index
@property
def kind(self):
return self.values.kind
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def copy(self, deep=True):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
placement=self.mgr_locs)
def make_block_same_class(self, values, placement,
sparse_index=None, kind=None, dtype=None,
fill_value=None, copy=False, fastpath=True):
""" return a new block """
if dtype is None:
dtype = self.dtype
if fill_value is None:
fill_value = self.values.fill_value
# if not isinstance(values, SparseArray) and values.ndim != self.ndim:
# raise ValueError("ndim mismatch")
if values.ndim == 2:
nitems = values.shape[0]
if nitems == 0:
# kludgy, but SparseBlocks cannot handle slices, where the
# output is 0-item, so let's convert it to a dense block: it
# won't take space since there's 0 items, plus it will preserve
# the dtype.
return make_block(np.empty(values.shape, dtype=dtype),
placement, fastpath=True,)
elif nitems > 1:
raise ValueError("Only 1-item 2d sparse blocks are supported")
else:
values = values.reshape(values.shape[1])
new_values = SparseArray(values, sparse_index=sparse_index,
kind=kind or self.kind, dtype=dtype,
fill_value=fill_value, copy=copy)
return make_block(new_values, ndim=self.ndim,
fastpath=fastpath, placement=placement)
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
values = com.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
def fillna(self, value, limit=None, inplace=False, downcast=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError
if issubclass(self.dtype.type, np.floating):
value = float(value)
values = self.values if inplace else self.values.copy()
return [self.make_block_same_class(values=values.get_values(value),
fill_value=value,
placement=self.mgr_locs)]
def shift(self, periods, axis=0):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
if periods > 0:
indexer[periods:] = np.arange(N - periods)
else:
indexer[:periods] = np.arange(-periods, N)
new_values = self.values.to_dense().take(indexer)
# convert integer to float if necessary. need to do a lot more than
# that, handle boolean etc also
new_values, fill_value = com._maybe_upcast(new_values)
if periods > 0:
new_values[:periods] = fill_value
else:
new_values[periods:] = fill_value
return [self.make_block_same_class(new_values, placement=self.mgr_locs)]
def reindex_axis(self, indexer, method=None, axis=1, fill_value=None,
limit=None, mask_info=None):
"""
Reindex using pre-computed indexer information
"""
if axis < 1:
raise AssertionError('axis must be at least 1, got %d' % axis)
# taking on the 0th axis always here
if fill_value is None:
fill_value = self.fill_value
return self.make_block_same_class(self.values.take(indexer),
fill_value=fill_value,
placement=self.mgr_locs)
def sparse_reindex(self, new_index):
""" sparse reindex and return a new block
current reindex only works for float64 dtype! """
values = self.values
values = values.sp_index.to_int_index().reindex(
values.sp_values.astype('float64'), values.fill_value, new_index)
return self.make_block_same_class(values, sparse_index=new_index,
placement=self.mgr_locs)
def make_block(values, placement, klass=None, ndim=None,
dtype=None, fastpath=False):
if klass is None:
dtype = dtype or values.dtype
vtype = dtype.type
if isinstance(values, SparseArray):
klass = SparseBlock
elif issubclass(vtype, np.floating):
klass = FloatBlock
elif (issubclass(vtype, np.integer) and
issubclass(vtype, np.timedelta64)):
klass = TimeDeltaBlock
elif (issubclass(vtype, np.integer) and
not issubclass(vtype, np.datetime64)):
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
elif issubclass(vtype, np.datetime64):
klass = DatetimeBlock
elif issubclass(vtype, np.complexfloating):
klass = ComplexBlock
elif is_categorical(values):
klass = CategoricalBlock
else:
klass = ObjectBlock
return klass(values, ndim=ndim, fastpath=fastpath,
placement=placement)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
get_scalar(label_tup)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', '_ndim', '_shape', '_known_consolidated',
'_is_consolidated', '_blknos', '_blklocs']
def __init__(self, blocks, axes, do_integrity_check=True, fastpath=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks)
for block in blocks:
if block.is_sparse:
if len(block.mgr_locs) != 1:
raise AssertionError("Sparse block refers to multiple items")
else:
if self.ndim != block.ndim:
raise AssertionError(('Number of Block dimensions (%d) must '
'equal number of axes (%d)')
% (block.ndim, self.ndim))
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [_ensure_index([])] + [
_ensure_index(a) for a in self.axes[1:]
]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self):
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = _ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError('Length mismatch: Expected axis has %d elements, '
'new values have %d elements' % (old_len, new_len))
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper))
return obj
def add_prefix(self, prefix):
f = (str(prefix) + '%s').__mod__
return self.rename_axis(f, axis=0)
def add_suffix(self, suffix):
f = ('%s' + str(suffix)).__mod__
return self.rename_axis(f, axis=0)
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return (blk.mgr_locs.is_slice_like and
blk.mgr_locs.as_slice == slice(0, len(self), 1))
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return com.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return com.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = [ax for ax in self.axes]
extra_state = {
'0.14.1': {
'axes': axes_array,
'blocks': [dict(values=b.values,
mgr_locs=b.mgr_locs.indexer)
for b in self.blocks]
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
# numpy < 1.7 pickle compat
if values.dtype == 'M8[us]':
values = values.astype('M8[ns]')
return make_block(values, placement=mgr_locs)
if (isinstance(state, tuple) and len(state) >= 4
and '0.14.1' in state[3]):
state = state[3]['0.14.1']
self.axes = [_ensure_index(ax) for ax in state['axes']]
self.blocks = tuple(
unpickle_block(b['values'], b['mgr_locs'])
for b in state['blocks'])
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [self.axes[0].get_indexer(blk_items)
for blk_items in bitems]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs))
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self):
return len(self.items)
def __unicode__(self):
output = com.pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += u('\nItems: %s') % ax
else:
output += u('\nAxis %d: %s') % (i, ax)
for block in self.blocks:
output += u('\n%s') % com.pprint_thing(block)
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if not block.is_sparse and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError('Number of manager items must equal union of '
'block items\n# manager items: {0}, # '
'tot_items: {1}'.format(len(self.items),
tot_items))
def apply(self, f, axes=None, filter=None, do_integrity_check=False, **kwargs):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager integrity check
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs['filter'] = filter_locs
if f == 'where' and kwargs.get('align', True):
align_copy = True
align_keys = ['other', 'cond']
elif f == 'putmask' and kwargs.get('align', True):
align_copy = False
align_keys = ['new', 'mask']
elif f == 'eval':
align_copy = False
align_keys = ['other']
elif f == 'fillna':
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ['value']
else:
align_keys = []
aligned_args = dict((k, kwargs[k]) for k in align_keys
if hasattr(kwargs[k], 'reindex_axis'))
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = getattr(obj, '_info_axis_number', 0)
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
applied = getattr(b, f)(**kwargs)
if isinstance(applied, list):
result_blocks.extend(applied)
else:
result_blocks.append(applied)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(result_blocks, axes or self.axes,
do_integrity_check=do_integrity_check)
bm._consolidate_inplace()
return bm
def isnull(self, **kwargs):
return self.apply('apply', **kwargs)
def where(self, **kwargs):
return self.apply('where', **kwargs)
def eval(self, **kwargs):
return self.apply('eval', **kwargs)
def setitem(self, **kwargs):
return self.apply('setitem', **kwargs)
def putmask(self, **kwargs):
return self.apply('putmask', **kwargs)
def diff(self, **kwargs):
return self.apply('diff', **kwargs)
def interpolate(self, **kwargs):
return self.apply('interpolate', **kwargs)
def shift(self, **kwargs):
return self.apply('shift', **kwargs)
def fillna(self, **kwargs):
return self.apply('fillna', **kwargs)
def downcast(self, **kwargs):
return self.apply('downcast', **kwargs)
def astype(self, dtype, **kwargs):
return self.apply('astype', dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply('convert', **kwargs)
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
def comp(s):
if isnull(s):
return isnull(values)
return _possibly_compare(values, getattr(s, 'asm8', s),
operator.eq)
masks = [comp(s) for i, s in enumerate(src_list)]
result_blocks = []
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
new_rb = []
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace,
regex=regex)
if isinstance(result, list):
new_rb.extend(result)
else:
new_rb.append(result)
else:
# get our mask for this element, sized to this
# particular block
m = masks[i][b.mgr_locs.indexer]
if m.any():
new_rb.extend(b.putmask(m, d, inplace=True))
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def reshape_nd(self, axes, **kwargs):
""" a 2d-nd reshape operation on a BlockManager """
return self.apply('reshape_nd', axes=axes, **kwargs)
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all([block.is_numeric for block in self.blocks])
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any([block.is_datelike for block in self.blocks])
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_items = self.items.take(indexer)
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = com.take_1d(inv_indexer, b.mgr_locs.as_array, axis=0,
allow_fill=False)
new_blocks.append(b)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(new_blocks, new_axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False,
fastpath=True)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == 'all':
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [ copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply('copy', axes=new_axes, deep=deep,
do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
return np.empty(self.shape, dtype=float)
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block or not self.is_mixed_type:
return mgr.blocks[0].get_values()
else:
return mgr._interleave()
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
result = np.empty(self.shape, dtype=dtype)
if result.shape[0] == 0:
# Workaround for numpy 1.7 bug:
#
# >>> a = np.empty((0,10))
# >>> a[slice(0,0)]
# array([], shape=(0, 10), dtype=float64)
# >>> a[[]]
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# IndexError: index 0 is out of bounds for axis 0 with size 0
return result
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError('Some items were not contained in blocks')
return result
def xs(self, key, axis=1, copy=True, takeable=False):
if axis < 1:
raise AssertionError('Can only take xs across axis >= 1, got %d'
% axis)
# take by position
if takeable:
loc = key
else:
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
# we must copy here as we are mixed type
for blk in self.blocks:
newb = make_block(values=blk.values[slicer],
klass=blk.__class__, fastpath=True,
placement=blk.mgr_locs)
new_blocks.append(newb)
elif len(self.blocks) == 1:
block = self.blocks[0]
vals = block.values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(values=vals, placement=block.mgr_locs,
klass=block.__class__, fastpath=True,)]
return self.__class__(new_blocks, new_axes)
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].values[:, loc]
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk._try_coerce_result(blk.iget((i, loc)))
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item, fastpath=True):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isnull(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isnull(self.items)]
# allow a single nan location indexer
if not np.isscalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc, fastpath=fastpath)
else:
if isnull(item):
raise ValueError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
indexer=indexer, axis=0, allow_dups=True)
def iget(self, i, fastpath=True):
"""
Return the data as a SingleBlockManager if fastpath=True and possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
if not fastpath or block.is_sparse or values.ndim != 1:
return values
# fastpath shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager([ block.make_block_same_class(values,
placement=slice(0, len(values)),
ndim=1,
fastpath=True) ],
self.axes[1])
def get_scalar(self, tup):
"""
Retrieve single item
"""
full_loc = list(ax.get_loc(x)
for ax, x in zip(self.axes, tup))
blk = self.blocks[self._blknos[full_loc[0]]]
full_loc[0] = self._blklocs[full_loc[0]]
# FIXME: this may return non-upcasted types?
return blk.values[tuple(full_loc)]
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(b for blkno, b in enumerate(self.blocks)
if not is_blk_deleted[blkno])
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value, check=False):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
if check, then validate that we are not setting the same data in-place
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_sparse = isinstance(value, SparseArray)
value_is_cat = is_categorical(value)
value_is_nonconsolidatable = value_is_sparse or value_is_cat
if value_is_sparse:
# sparse
assert self.ndim == 2
def value_getitem(placement):
return value
elif value_is_cat:
# categorical
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError('Shape of new values must be compatible '
'with manager shape')
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs), check=check)
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks -
len(removed_blknos))
self._blknos = com.take_1d(new_blknos, self._blknos, axis=0,
allow_fill=False)
self.blocks = tuple(blk for i, blk in enumerate(self.blocks)
if i not in set(removed_blknos))
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_nonconsolidatable:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(values=value.copy(), ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1))
for mgr_loc in unfit_mgr_locs)
self._blknos[unfit_mgr_locs] = (np.arange(unfit_count) +
len(self.blocks))
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(values=value_getitem(unfit_val_items),
ndim=self.ndim, placement=unfit_mgr_locs))
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc, item, value, allow_duplicates=False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError('cannot insert %s, already exists' % item)
if not isinstance(loc, int):
raise TypeError("loc must be int")
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster (at least in numpy 1.7.1), let's use it
# if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = self.items.insert(loc, item)
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(self, new_index, axis, method=None, limit=None,
fill_value=None, copy=True):
"""
Conform block manager to new index.
"""
new_index = _ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit)
return self.reindex_indexer(new_index, indexer, axis=axis,
fill_value=fill_value, copy=copy)
def reindex_indexer(self, new_axis, indexer, axis, fill_value=None,
allow_dups=False, copy=True):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# trying to reindex on an axis with duplicates
if (not allow_dups and not self.axes[axis].is_unique
and len(indexer)):
raise ValueError("cannot reindex from a duplicate axis")
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(
indexer, fill_tuple=(fill_value,))
else:
new_blocks = [blk.take_nd(indexer, axis=axis,
fill_tuple=(fill_value if fill_value is not None else
blk.fill_value,))
for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ('slice', 'mask'):
return [blk.getitem_block(slobj,
new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = com._maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in ('slice', 'mask'):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = com.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = com.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in _get_blkno_placements(blknos, len(self.blocks),
group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(self._make_na_block(
placement=mgr_locs, fill_value=fill_value))
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's only one item
# and each mgr loc is a copy of that single item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(
blklocs[mgr_locs.indexer], axis=0,
new_mgr_locs=mgr_locs, fill_tuple=None))
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = com._infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64') if isinstance(indexer, slice) \
else np.asanyarray(indexer, dtype='int64')
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True)
def merge(self, other, lsuffix='', rsuffix=''):
if not self._is_indexed_like(other):
raise AssertionError('Must have same axes to merge managers')
l, r = items_overlap_with_suffix(left=self.items, lsuffix=lsuffix,
right=other.items, rsuffix=rsuffix)
new_items = _concat_indexes([l, r])
new_blocks = [blk.copy(deep=False)
for blk in self.blocks]
offset = self.shape[0]
for blk in other.blocks:
blk = blk.copy(deep=False)
blk.mgr_locs = blk.mgr_locs.add(offset)
new_blocks.append(blk)
new_axes = list(self.axes)
new_axes[0] = new_items
return self.__class__(_consolidate(new_blocks), new_axes)
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
if self.ndim != other.ndim:
raise AssertionError(('Number of dimensions must agree '
'got %d and %d') % (self.ndim, other.ndim))
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def equals(self, other):
self_axes, other_axes = self.axes, other.axes
if len(self_axes) != len(other_axes):
return False
if not all (ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
return False
self._consolidate_inplace()
other._consolidate_inplace()
return all(block.equals(oblock) for block, oblock in
zip(self.blocks, other.blocks))
class SingleBlockManager(BlockManager):
""" manage a single block with """
ndim = 1
_is_consolidated = True
_known_consolidated = True
__slots__ = ()
def __init__(self, block, axis, do_integrity_check=False, fastpath=False):
if isinstance(axis, list):
if len(axis) != 1:
raise ValueError(
"cannot create SingleBlockManager with more than 1 axis")
axis = axis[0]
# passed from constructor, single block, single axis
if fastpath:
self.axes = [axis]
if isinstance(block, list):
# empty block
if len(block) == 0:
block = [np.array([])]
elif len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
else:
self.axes = [_ensure_index(axis)]
# create the block here
if isinstance(block, list):
# provide consolidation to the interleaved_dtype
if len(block) > 1:
dtype = _interleaved_dtype(block)
block = [b.astype(dtype) for b in block]
block = _consolidate(block)
if len(block) != 1:
raise ValueError('Cannot create SingleBlockManager with '
'more than 1 block')
block = block[0]
if not isinstance(block, Block):
block = make_block(block,
placement=slice(0, len(axis)),
ndim=1, fastpath=True)
self.blocks = [block]
def _post_setstate(self):
pass
@property
def _block(self):
return self.blocks[0]
@property
def _values(self):
return self._block.values
def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
limit=None, copy=True):
# if we are the same and don't copy, just return
if self.index.equals(new_axis):
if copy:
return self.copy(deep=True)
else:
return self
values = self._block.get_values()
if indexer is None:
indexer = self.items.get_indexer_for(new_axis)
if fill_value is None:
# FIXME: is fill_value used correctly in sparse blocks?
if not self._block.is_sparse:
fill_value = self._block.fill_value
else:
fill_value = np.nan
new_values = com.take_1d(values, indexer,
fill_value=fill_value)
# fill if needed
if method is not None or limit is not None:
new_values = com.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
block = make_block(new_values, copy=copy,
placement=slice(0, len(new_axis)))
mgr = SingleBlockManager(block, new_axis)
mgr._consolidate_inplace()
return mgr
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
return self.__class__(self._block._slice(slobj),
self.index[slobj], fastpath=True)
@property
def index(self):
return self.axes[0]
def convert(self, **kwargs):
""" convert the whole block as one """
kwargs['by_item'] = False
return self.apply('convert', **kwargs)
@property
def dtype(self):
return self._values.dtype
@property
def array_dtype(self):
return self._block.array_dtype
@property
def ftype(self):
return self._block.ftype
def get_dtype_counts(self):
return {self.dtype.name: 1}
def get_ftype_counts(self):
return {self.ftype: 1}
def get_dtypes(self):
return np.array([self._block.dtype])
def get_ftypes(self):
return np.array([self._block.ftype])
@property
def values(self):
return self._values.view()
def get_values(self):
""" return a dense type view """
return np.array(self._block.to_dense(),copy=False)
@property
def itemsize(self):
return self._values.itemsize
@property
def _can_hold_na(self):
return self._block._can_hold_na
def is_consolidated(self):
return True
def _consolidate_check(self):
pass
def _consolidate_inplace(self):
pass
def delete(self, item):
"""
Delete single item from SingleBlockManager.
Ensures that self.blocks doesn't become empty.
"""
loc = self.items.get_loc(item)
self._block.delete(loc)
self.axes[0] = self.axes[0].delete(loc)
def fast_xs(self, loc):
"""
fast path for getting a cross-section
return a view of the data
"""
return self._block.values[loc]
def construction_error(tot_items, block_shape, axes, e=None):
""" raise a helpful message about our construction """
passed = tuple(map(int, [tot_items] + list(block_shape)))
implied = tuple(map(int, [len(ax) for ax in axes]))
if passed == implied and e is not None:
raise e
raise ValueError("Shape of passed values is {0}, indices imply {1}".format(
passed,implied))
def create_block_manager_from_blocks(blocks, axes):
try:
if len(blocks) == 1 and not isinstance(blocks[0], Block):
# It's OK if a single block is passed as values, its placement is
# basically "all items", but if there're many, don't bother
# converting, it's an error anyway.
blocks = [make_block(values=blocks[0],
placement=slice(0, len(axes[0])))]
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
blocks = [getattr(b, 'values', b) for b in blocks]
tot_items = sum(b.shape[0] for b in blocks)
construction_error(tot_items, blocks[0].shape[1:], axes, e)
def create_block_manager_from_arrays(arrays, names, axes):
try:
blocks = form_blocks(arrays, names, axes)
mgr = BlockManager(blocks, axes)
mgr._consolidate_inplace()
return mgr
except (ValueError) as e:
construction_error(len(arrays), arrays[0].shape, axes, e)
def form_blocks(arrays, names, axes):
# put "leftover" items in float bucket, where else?
# generalize?
float_items = []
complex_items = []
int_items = []
bool_items = []
object_items = []
sparse_items = []
datetime_items = []
cat_items = []
extra_locs = []
names_idx = Index(names)
if names_idx.equals(axes[0]):
names_indexer = np.arange(len(names_idx))
else:
assert names_idx.intersection(axes[0]).is_unique
names_indexer = names_idx.get_indexer_for(axes[0])
for i, name_idx in enumerate(names_indexer):
if name_idx == -1:
extra_locs.append(i)
continue
k = names[name_idx]
v = arrays[name_idx]
if isinstance(v, (SparseArray, ABCSparseSeries)):
sparse_items.append((i, k, v))
elif issubclass(v.dtype.type, np.floating):
float_items.append((i, k, v))
elif issubclass(v.dtype.type, np.complexfloating):
complex_items.append((i, k, v))
elif issubclass(v.dtype.type, np.datetime64):
if v.dtype != _NS_DTYPE:
v = tslib.cast_to_nanoseconds(v)
if hasattr(v, 'tz') and v.tz is not None:
object_items.append((i, k, v))
else:
datetime_items.append((i, k, v))
elif issubclass(v.dtype.type, np.integer):
if v.dtype == np.uint64:
# HACK #2355 definite overflow
if (v > 2 ** 63 - 1).any():
object_items.append((i, k, v))
continue
int_items.append((i, k, v))
elif v.dtype == np.bool_:
bool_items.append((i, k, v))
elif is_categorical(v):
cat_items.append((i, k, v))
else:
object_items.append((i, k, v))
blocks = []
if len(float_items):
float_blocks = _multi_blockify(float_items)
blocks.extend(float_blocks)
if len(complex_items):
complex_blocks = _simple_blockify(
complex_items, np.complex128)
blocks.extend(complex_blocks)
if len(int_items):
int_blocks = _multi_blockify(int_items)
blocks.extend(int_blocks)
if len(datetime_items):
datetime_blocks = _simple_blockify(
datetime_items, _NS_DTYPE)
blocks.extend(datetime_blocks)
if len(bool_items):
bool_blocks = _simple_blockify(
bool_items, np.bool_)
blocks.extend(bool_blocks)
if len(object_items) > 0:
object_blocks = _simple_blockify(
object_items, np.object_)
blocks.extend(object_blocks)
if len(sparse_items) > 0:
sparse_blocks = _sparse_blockify(sparse_items)
blocks.extend(sparse_blocks)
if len(cat_items) > 0:
cat_blocks = [ make_block(array,
klass=CategoricalBlock,
fastpath=True,
placement=[i]
) for i, names, array in cat_items ]
blocks.extend(cat_blocks)
if len(extra_locs):
shape = (len(extra_locs),) + tuple(len(x) for x in axes[1:])
# empty items -> dtype object
block_values = np.empty(shape, dtype=object)
block_values.fill(np.nan)
na_block = make_block(block_values, placement=extra_locs)
blocks.append(na_block)
return blocks
def _simple_blockify(tuples, dtype):
""" return a single array of a block that has a single dtype; if dtype is
not None, coerce to this dtype
"""
values, placement = _stack_arrays(tuples, dtype)
# CHECK DTYPE?
if dtype is not None and values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
block = make_block(values, placement=placement)
return [block]
def _multi_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes """
# group by dtype
grouper = itertools.groupby(tuples, lambda x: x[2].dtype)
new_blocks = []
for dtype, tup_block in grouper:
values, placement = _stack_arrays(
list(tup_block), dtype)
block = make_block(values, placement=placement)
new_blocks.append(block)
return new_blocks
def _sparse_blockify(tuples, dtype=None):
""" return an array of blocks that potentially have different dtypes (and
are sparse)
"""
new_blocks = []
for i, names, array in tuples:
array = _maybe_to_sparse(array)
block = make_block(
array, klass=SparseBlock, fastpath=True,
placement=[i])
new_blocks.append(block)
return new_blocks
def _stack_arrays(tuples, dtype):
# fml
def _asarray_compat(x):
if isinstance(x, ABCSeries):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
if isinstance(x, ABCSeries):
return len(x),
else:
return x.shape
placement, names, arrays = zip(*tuples)
first = arrays[0]
shape = (len(arrays),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, arr in enumerate(arrays):
stacked[i] = _asarray_compat(arr)
return stacked, placement
def _interleaved_dtype(blocks):
if not len(blocks):
return None
counts = defaultdict(lambda: [])
for x in blocks:
counts[type(x)].append(x)
def _lcd_dtype(l):
""" find the lowest dtype that can accomodate the given types """
m = l[0].dtype
for x in l[1:]:
if x.dtype.itemsize > m.itemsize:
m = x.dtype
return m
have_int = len(counts[IntBlock]) > 0
have_bool = len(counts[BoolBlock]) > 0
have_object = len(counts[ObjectBlock]) > 0
have_float = len(counts[FloatBlock]) > 0
have_complex = len(counts[ComplexBlock]) > 0
have_dt64 = len(counts[DatetimeBlock]) > 0
have_td64 = len(counts[TimeDeltaBlock]) > 0
have_cat = len(counts[CategoricalBlock]) > 0
have_sparse = len(counts[SparseBlock]) > 0
have_numeric = have_float or have_complex or have_int
has_non_numeric = have_dt64 or have_td64 or have_cat
if (have_object or
(have_bool and (have_numeric or have_dt64 or have_td64)) or
(have_numeric and has_non_numeric) or
have_cat or
have_dt64 or
have_td64):
return np.dtype(object)
elif have_bool:
return np.dtype(bool)
elif have_int and not have_float and not have_complex:
# if we are mixing unsigned and signed, then return
# the next biggest int type (if we can)
lcd = _lcd_dtype(counts[IntBlock])
kinds = set([i.dtype.kind for i in counts[IntBlock]])
if len(kinds) == 1:
return lcd
if lcd == 'uint64' or lcd == 'int64':
return np.dtype('int64')
# return 1 bigger on the itemsize if unsinged
if lcd.kind == 'u':
return np.dtype('int%s' % (lcd.itemsize * 8 * 2))
return lcd
elif have_complex:
return np.dtype('c16')
else:
return _lcd_dtype(counts[FloatBlock] + counts[SparseBlock])
def _consolidate(blocks):
"""
Merge blocks having same dtype, exclude non-consolidating blocks
"""
# sort by _can_consolidate, dtype
gkey = lambda x: x._consolidate_key
grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
new_blocks = []
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
if isinstance(merged_blocks, list):
new_blocks.extend(merged_blocks)
else:
new_blocks.append(merged_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
if len(blocks) == 1:
return blocks[0]
if _can_consolidate:
if dtype is None:
if len(set([b.dtype for b in blocks])) != 1:
raise AssertionError("_merge_blocks are invalid!")
dtype = blocks[0].dtype
# FIXME: optimization potential in case all mgrs contain slices and
# combination of those slices is a slice, too.
new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
new_values = _vstack([b.values for b in blocks], dtype)
argsort = np.argsort(new_mgr_locs)
new_values = new_values[argsort]
new_mgr_locs = new_mgr_locs[argsort]
return make_block(new_values,
fastpath=True, placement=new_mgr_locs)
# no merge
return blocks
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
if shape is None:
shape = values.shape
values = values.reshape(tuple((1,) + shape))
return values
def _vstack(to_stack, dtype):
# work around NumPy 1.6 bug
if dtype == _NS_DTYPE or dtype == _TD_DTYPE:
new_values = np.vstack([x.view('i8') for x in to_stack])
return new_values.view(dtype)
else:
return np.vstack(to_stack)
def _possibly_compare(a, b, op):
res = op(a, b)
is_a_array = isinstance(a, np.ndarray)
is_b_array = isinstance(b, np.ndarray)
if np.isscalar(res) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
type_names[0] = 'ndarray(dtype=%s)' % a.dtype
if is_b_array:
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
return res
def _concat_indexes(indexes):
return indexes[0].append(indexes[1:])
def _block2d_to_blocknd(values, placement, shape, labels, ref_items):
""" pivot to the labels shape """
from pandas.core.internals import make_block
panel_shape = (len(placement),) + shape
# TODO: lexsort depth needs to be 2!!
# Create observation selection vector using major and minor
# labels, for converting to panel format.
selector = _factor_indexer(shape[1:], labels)
mask = np.zeros(np.prod(shape), dtype=bool)
mask.put(selector, True)
if mask.all():
pvalues = np.empty(panel_shape, dtype=values.dtype)
else:
dtype, fill_value = _maybe_promote(values.dtype)
pvalues = np.empty(panel_shape, dtype=dtype)
pvalues.fill(fill_value)
values = values
for i in range(len(placement)):
pvalues[i].flat[mask] = values[:, i]
return make_block(pvalues, placement=placement)
def _factor_indexer(shape, labels):
"""
given a tuple of shape and a list of Categorical labels, return the
expanded label indexer
"""
mult = np.array(shape)[::-1].cumprod()[::-1]
return com._ensure_platform_int(
np.sum(np.array(labels).T * np.append(mult, [1]), axis=1).T)
def _get_blkno_placements(blknos, blk_count, group=True):
"""
Parameters
----------
blknos : array of int64
blk_count : int
group : bool
Returns
-------
iterator
yield (BlockPlacement, blkno)
"""
blknos = com._ensure_int64(blknos)
# FIXME: blk_count is unused, but it may avoid the use of dicts in cython
for blkno, indexer in lib.get_blkno_indexers(blknos, group):
yield blkno, BlockPlacement(indexer)
def items_overlap_with_suffix(left, lsuffix, right, rsuffix):
"""
If two indices overlap, add suffixes to overlapping entries.
If corresponding suffix is empty, the entry is simply converted to string.
"""
to_rename = left.intersection(right)
if len(to_rename) == 0:
return left, right
else:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: %s' %
to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
return (_transform_index(left, lrenamer),
_transform_index(right, rrenamer))
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
def _putmask_smart(v, m, n):
"""
Return a new block, try to preserve dtype if possible.
Parameters
----------
v : `values`, updated in-place (array like)
m : `mask`, applies to both sides (array like)
n : `new values` either scalar or an array like aligned with `values`
"""
# n should be the length of the mask or a scalar here
if not is_list_like(n):
n = np.array([n] * len(m))
elif isinstance(n, np.ndarray) and n.ndim == 0: # numpy scalar
n = np.repeat(np.array(n, ndmin=1), len(m))
# see if we are only masking values that if putted
# will work in the current dtype
try:
nn = n[m]
nn_at = nn.astype(v.dtype)
if (nn == nn_at).all():
nv = v.copy()
nv[m] = nn_at
return nv
except (ValueError, IndexError, TypeError):
pass
# change the dtype
dtype, _ = com._maybe_promote(n.dtype)
nv = v.astype(dtype)
try:
nv[m] = n[m]
except ValueError:
idx, = np.where(np.squeeze(m))
for mask_index, new_val in zip(idx, n[m]):
nv[mask_index] = new_val
return nv
def concatenate_block_managers(mgrs_indexers, axes, concat_axis, copy):
"""
Concatenate block managers into one.
Parameters
----------
mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
axes : list of Index
concat_axis : int
copy : bool
"""
concat_plan = combine_concat_plans([get_mgr_concatenation_plan(mgr, indexers)
for mgr, indexers in mgrs_indexers],
concat_axis)
blocks = [make_block(concatenate_join_units(join_units, concat_axis,
copy=copy),
placement=placement)
for placement, join_units in concat_plan]
return BlockManager(blocks, axes)
def get_empty_dtype_and_na(join_units):
"""
Return dtype and N/A values to use when concatenating specified units.
Returned N/A value may be None which means there was no casting involved.
Returns
-------
dtype
na
"""
if len(join_units) == 1:
blk = join_units[0].block
if blk is None:
return np.float64, np.nan
has_none_blocks = False
dtypes = [None] * len(join_units)
for i, unit in enumerate(join_units):
if unit.block is None:
has_none_blocks = True
else:
dtypes[i] = unit.dtype
# dtypes = set()
upcast_classes = set()
null_upcast_classes = set()
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
if com.is_categorical_dtype(dtype):
upcast_cls = 'category'
elif issubclass(dtype.type, np.bool_):
upcast_cls = 'bool'
elif issubclass(dtype.type, np.object_):
upcast_cls = 'object'
elif is_datetime64_dtype(dtype):
upcast_cls = 'datetime'
elif is_timedelta64_dtype(dtype):
upcast_cls = 'timedelta'
else:
upcast_cls = 'float'
# Null blocks should not influence upcast class selection, unless there
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
null_upcast_classes.add(upcast_cls)
else:
upcast_classes.add(upcast_cls)
if not upcast_classes:
upcast_classes = null_upcast_classes
# create the result
if 'object' in upcast_classes:
return np.dtype(np.object_), np.nan
elif 'bool' in upcast_classes:
if has_none_blocks:
return np.dtype(np.object_), np.nan
else:
return np.dtype(np.bool_), None
elif 'category' in upcast_classes:
return com.CategoricalDtype(), np.nan
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
return np.dtype('m8[ns]'), tslib.iNaT
else: # pragma
raise AssertionError("invalid dtype determination in get_concat_dtype")
def concatenate_join_units(join_units, concat_axis, copy):
"""
Concatenate values from several join units along selected axis.
"""
if concat_axis == 0 and len(join_units) > 1:
# Concatenating join units along ax0 is handled in _merge_blocks.
raise AssertionError("Concatenating join units along axis0")
empty_dtype, upcasted_na = get_empty_dtype_and_na(join_units)
to_concat = [ju.get_reindexed_values(empty_dtype=empty_dtype,
upcasted_na=upcasted_na)
for ju in join_units]
if len(to_concat) == 1:
# Only one block, nothing to concatenate.
concat_values = to_concat[0]
if copy and concat_values.base is not None:
concat_values = concat_values.copy()
else:
concat_values = com._concat_compat(to_concat, axis=concat_axis)
return concat_values
def get_mgr_concatenation_plan(mgr, indexers):
"""
Construct concatenation plan for given block manager and indexers.
Parameters
----------
mgr : BlockManager
indexers : dict of {axis: indexer}
Returns
-------
plan : list of (BlockPlacement, JoinUnit) tuples
"""
# Calculate post-reindex shape , save for item axis which will be separate
# for each block anyway.
mgr_shape = list(mgr.shape)
for ax, indexer in indexers.items():
mgr_shape[ax] = len(indexer)
mgr_shape = tuple(mgr_shape)
if 0 in indexers:
ax0_indexer = indexers.pop(0)
blknos = com.take_1d(mgr._blknos, ax0_indexer, fill_value=-1)
blklocs = com.take_1d(mgr._blklocs, ax0_indexer, fill_value=-1)
else:
if mgr._is_single_block:
blk = mgr.blocks[0]
return [(blk.mgr_locs, JoinUnit(blk, mgr_shape, indexers))]
ax0_indexer = None
blknos = mgr._blknos
blklocs = mgr._blklocs
plan = []
for blkno, placements in _get_blkno_placements(blknos, len(mgr.blocks),
group=False):
assert placements.is_slice_like
join_unit_indexers = indexers.copy()
shape = list(mgr_shape)
shape[0] = len(placements)
shape = tuple(shape)
if blkno == -1:
unit = JoinUnit(None, shape)
else:
blk = mgr.blocks[blkno]
ax0_blk_indexer = blklocs[placements.indexer]
unit_no_ax0_reindexing = (
len(placements) == len(blk.mgr_locs) and
# Fastpath detection of join unit not needing to reindex its
# block: no ax0 reindexing took place and block placement was
# sequential before.
((ax0_indexer is None
and blk.mgr_locs.is_slice_like
and blk.mgr_locs.as_slice.step == 1) or
# Slow-ish detection: all indexer locs are sequential (and
# length match is checked above).
(np.diff(ax0_blk_indexer) == 1).all()))
# Omit indexer if no item reindexing is required.
if unit_no_ax0_reindexing:
join_unit_indexers.pop(0, None)
else:
join_unit_indexers[0] = ax0_blk_indexer
unit = JoinUnit(blk, shape, join_unit_indexers)
plan.append((placements, unit))
return plan
def combine_concat_plans(plans, concat_axis):
"""
Combine multiple concatenation plans into one.
existing_plan is updated in-place.
"""
if len(plans) == 1:
for p in plans[0]:
yield p[0], [p[1]]
elif concat_axis == 0:
offset = 0
for plan in plans:
last_plc = None
for plc, unit in plan:
yield plc.add(offset), [unit]
last_plc = plc
if last_plc is not None:
offset += last_plc.as_slice.stop
else:
num_ended = [0]
def _next_or_none(seq):
retval = next(seq, None)
if retval is None:
num_ended[0] += 1
return retval
plans = list(map(iter, plans))
next_items = list(map(_next_or_none, plans))
while num_ended[0] != len(next_items):
if num_ended[0] > 0:
raise ValueError("Plan shapes are not aligned")
placements, units = zip(*next_items)
lengths = list(map(len, placements))
min_len, max_len = min(lengths), max(lengths)
if min_len == max_len:
yield placements[0], units
next_items[:] = map(_next_or_none, plans)
else:
yielded_placement = None
yielded_units = [None] * len(next_items)
for i, (plc, unit) in enumerate(next_items):
yielded_units[i] = unit
if len(plc) > min_len:
# trim_join_unit updates unit in place, so only
# placement needs to be sliced to skip min_len.
next_items[i] = (plc[min_len:],
trim_join_unit(unit, min_len))
else:
yielded_placement = plc
next_items[i] = _next_or_none(plans[i])
yield yielded_placement, yielded_units
def trim_join_unit(join_unit, length):
"""
Reduce join_unit's shape along item axis to length.
Extra items that didn't fit are returned as a separate block.
"""
if 0 not in join_unit.indexers:
extra_indexers = join_unit.indexers
if join_unit.block is None:
extra_block = None
else:
extra_block = join_unit.block.getitem_block(slice(length, None))
join_unit.block = join_unit.block.getitem_block(slice(length))
else:
extra_block = join_unit.block
extra_indexers = copy.copy(join_unit.indexers)
extra_indexers[0] = extra_indexers[0][length:]
join_unit.indexers[0] = join_unit.indexers[0][:length]
extra_shape = (join_unit.shape[0] - length,) + join_unit.shape[1:]
join_unit.shape = (length,) + join_unit.shape[1:]
return JoinUnit(block=extra_block, indexers=extra_indexers,
shape=extra_shape)
class JoinUnit(object):
def __init__(self, block, shape, indexers={}):
# Passing shape explicitly is required for cases when block is None.
self.block = block
self.indexers = indexers
self.shape = shape
def __repr__(self):
return '%s(%r, %s)' % (self.__class__.__name__,
self.block, self.indexers)
@cache_readonly
def needs_filling(self):
for indexer in self.indexers.values():
# FIXME: cache results of indexer == -1 checks.
if (indexer == -1).any():
return True
return False
@cache_readonly
def dtype(self):
if self.block is None:
raise AssertionError("Block is None, no dtype")
if not self.needs_filling:
return self.block.dtype
else:
return com._get_dtype(com._maybe_promote(self.block.dtype,
self.block.fill_value)[0])
return self._dtype
@cache_readonly
def is_null(self):
if self.block is None:
return True
if not self.block._can_hold_na:
return False
# Usually it's enough to check but a small fraction of values to see if
# a block is NOT null, chunks should help in such cases. 1000 value
# was chosen rather arbitrarily.
values_flat = self.block.values.ravel()
total_len = values_flat.shape[0]
chunk_len = max(total_len // 40, 1000)
for i in range(0, total_len, chunk_len):
if not isnull(values_flat[i: i + chunk_len]).all():
return False
return True
@cache_readonly
def needs_block_conversion(self):
""" we might need to convert the joined values to a suitable block repr """
block = self.block
return block is not None and (block.is_sparse or block.is_categorical)
def get_reindexed_values(self, empty_dtype, upcasted_na):
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
values = self.block.get_values()
else:
fill_value = upcasted_na
if self.is_null and not getattr(self.block,'is_categorical',None):
missing_arr = np.empty(self.shape, dtype=empty_dtype)
if np.prod(self.shape):
# NumPy 1.6 workaround: this statement gets strange if all
# blocks are of same dtype and some of them are empty:
# empty one are considered "null" so they must be filled,
# but no dtype upcasting happens and the dtype may not
# allow NaNs.
#
# In general, no one should get hurt when one tries to put
# incorrect values into empty array, but numpy 1.6 is
# strict about that.
missing_arr.fill(fill_value)
return missing_arr
if not self.indexers:
if self.block.is_categorical:
# preserve the categoricals for validation in _concat_compat
return self.block.values
elif self.block.is_sparse:
# preserve the sparse array for validation in _concat_compat
return self.block.values
if self.block.is_bool:
# External code requested filling/upcasting, bool values must
# be upcasted to object to avoid being upcasted to numeric.
values = self.block.astype(np.object_).values
else:
# No dtype upcasting is done here, it will be performed during
# concatenation itself.
values = self.block.get_values()
if not self.indexers:
# If there's no indexing to be done, we want to signal outside
# code that this array must be copied explicitly. This is done
# by returning a view and checking `retval.base`.
values = values.view()
else:
for ax, indexer in self.indexers.items():
values = com.take_nd(values, indexer, axis=ax,
fill_value=fill_value)
return values
def _fast_count_smallints(arr):
"""Faster version of set(arr) for sequences of small numbers."""
if len(arr) == 0:
# Handle empty arr case separately: numpy 1.6 chokes on that.
return np.empty((0, 2), dtype=arr.dtype)
else:
counts = np.bincount(arr.astype(np.int_))
nz = counts.nonzero()[0]
return np.c_[nz, counts[nz]]
def _preprocess_slice_or_indexer(slice_or_indexer, length, allow_fill):
if isinstance(slice_or_indexer, slice):
return 'slice', slice_or_indexer, lib.slice_len(slice_or_indexer,
length)
elif (isinstance(slice_or_indexer, np.ndarray) and
slice_or_indexer.dtype == np.bool_):
return 'mask', slice_or_indexer, slice_or_indexer.sum()
else:
indexer = np.asanyarray(slice_or_indexer, dtype=np.int64)
if not allow_fill:
indexer = maybe_convert_indices(indexer, length)
return 'fancy', indexer, len(indexer)
| webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/pandas/core/internals.py | Python | gpl-2.0 | 150,471 |
# This file is part of the mantid workbench.
#
# Copyright (C) 2017 mantidproject
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, unicode_literals)
# system imports
import unittest
# local imports
from mantidqt.widgets.codeeditor.editor import CodeEditor
from mantidqt.utils.qt.testing import requires_qapp
TEST_LANG = "Python"
@requires_qapp
class CodeEditorTest(unittest.TestCase):
# ---------------------------------------------------------------
# Success tests
# ---------------------------------------------------------------
def test_construction_accepts_Python_as_language(self):
CodeEditor(TEST_LANG)
def test_default_construction_yields_empty_filename(self):
widget = CodeEditor(TEST_LANG)
self.assertEqual("", widget.fileName())
def test_set_filename_returns_expected_string(self):
widget = CodeEditor(TEST_LANG)
test_filename = "myscript.py"
widget.setFileName(test_filename)
self.assertEqual(test_filename, widget.fileName())
def test_set_text_can_be_read_again(self):
widget = CodeEditor(TEST_LANG)
code_str = 'print "Hello World!"'
widget.setText(code_str)
self.assertEqual(code_str, widget.text())
def test_default_construction_yields_editable_widget(self):
widget = CodeEditor(TEST_LANG)
self.assertFalse(widget.isReadOnly())
def test_setReadOnly_to_true_sets_readonly_status(self):
widget = CodeEditor(TEST_LANG)
widget.setReadOnly(True)
self.assertTrue(widget.isReadOnly())
def test_get_selection_for_empty_selection(self):
widget = CodeEditor(TEST_LANG)
res = widget.getSelection()
self.assertEqual((-1, -1, -1, -1), res)
def test_get_selection_for_non_empty_selection(self):
widget = CodeEditor(TEST_LANG)
widget.setText("""first line
second line
third line
fourth line
""")
selected = (0, 2, 3, 4)
widget.setSelection(*selected)
res = widget.getSelection()
self.assertEqual(selected, res)
# ---------------------------------------------------------------
# Failure tests
# ---------------------------------------------------------------
def test_construction_raises_error_for_unknown_language(self):
# self.assertRaises causes a segfault here for some reason...
try:
CodeEditor("MyCoolLanguage")
except ValueError:
pass
except Exception as exc:
self.fail("Expected a Value error to be raised but found a " + exc.__name__)
if __name__ == '__main__':
unittest.main()
| ScreamingUdder/mantid | qt/python/mantidqt/widgets/codeeditor/test/test_codeeditor.py | Python | gpl-3.0 | 3,332 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import pkgutil
import plistlib
import subprocess
from collections import namedtuple
from contextlib import contextmanager
from six import string_types
from pants.base.revision import Revision
from pants.java.util import execute_java, execute_java_async
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import temporary_dir
from pants.util.memo import memoized_property
from pants.util.osutil import OS_ALIASES, normalize_os_name
logger = logging.getLogger(__name__)
class Distribution(object):
"""Represents a java distribution - either a JRE or a JDK installed on the local system.
In particular provides access to the distribution's binaries; ie: java while ensuring basic
constraints are met. For example a minimum version can be specified if you know need to compile
source code or run bytecode that exercise features only available in that version forward.
:API: public
"""
class Error(Exception):
"""Indicates an invalid java distribution."""
@staticmethod
def _parse_java_version(name, version):
# Java version strings have been well defined since release 1.3.1 as defined here:
# http://www.oracle.com/technetwork/java/javase/versioning-naming-139433.html
# These version strings comply with semver except that the traditional pre-release semver
# slot (the 4th) can be delimited by an _ in the case of update releases of the jdk.
# We accommodate that difference here using lenient parsing.
# We also accommodate specification versions, which just have major and minor
# components; eg: `1.8`. These are useful when specifying constraints a distribution must
# satisfy; eg: to pick any 1.8 java distribution: '1.8' <= version <= '1.8.99'
if isinstance(version, string_types):
version = Revision.lenient(version)
if version and not isinstance(version, Revision):
raise ValueError('{} must be a string or a Revision object, given: {}'.format(name, version))
return version
@staticmethod
def _is_executable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def __init__(self, home_path=None, bin_path=None, minimum_version=None, maximum_version=None,
jdk=False):
"""Creates a distribution wrapping the given `home_path` or `bin_path`.
Only one of `home_path` or `bin_path` should be supplied.
:param string home_path: the path to the java distribution's home dir
:param string bin_path: the path to the java distribution's bin dir
:param minimum_version: a modified semantic version string or else a Revision object
:param maximum_version: a modified semantic version string or else a Revision object
:param bool jdk: ``True`` to require the distribution be a JDK vs a JRE
"""
if home_path and not os.path.isdir(home_path):
raise ValueError('The specified java home path is invalid: {}'.format(home_path))
if bin_path and not os.path.isdir(bin_path):
raise ValueError('The specified binary path is invalid: {}'.format(bin_path))
if not bool(home_path) ^ bool(bin_path):
raise ValueError('Exactly one of home path or bin path should be supplied, given: '
'home_path={} bin_path={}'.format(home_path, bin_path))
self._home = home_path
self._bin_path = bin_path or (os.path.join(home_path, 'bin') if home_path else '/usr/bin')
self._minimum_version = self._parse_java_version("minimum_version", minimum_version)
self._maximum_version = self._parse_java_version("maximum_version", maximum_version)
self._jdk = jdk
self._is_jdk = False
self._system_properties = None
self._version = None
self._validated_binaries = {}
@property
def jdk(self):
self.validate()
return self._is_jdk
@property
def system_properties(self):
"""Returns a dict containing the system properties of this java distribution."""
return dict(self._get_system_properties(self.java))
@property
def version(self):
"""Returns the distribution version.
Raises Distribution.Error if this distribution is not valid according to the configured
constraints.
"""
return self._get_version(self.java)
def find_libs(self, names):
"""Looks for jars in the distribution lib folder(s).
If the distribution is a JDK, both the `lib` and `jre/lib` dirs will be scanned.
The endorsed and extension dirs are not checked.
:param list names: jar file names
:return: list of paths to requested libraries
:raises: `Distribution.Error` if any of the jars could not be found.
"""
def collect_existing_libs():
def lib_paths():
yield os.path.join(self.home, 'lib')
if self.jdk:
yield os.path.join(self.home, 'jre', 'lib')
for name in names:
for path in lib_paths():
lib_path = os.path.join(path, name)
if os.path.exists(lib_path):
yield lib_path
break
else:
raise Distribution.Error('Failed to locate {} library'.format(name))
return list(collect_existing_libs())
@property
def home(self):
"""Returns the distribution JAVA_HOME."""
if not self._home:
home = self._get_system_properties(self.java)['java.home']
# The `jre/bin/java` executable in a JDK distribution will report `java.home` as the jre dir,
# so we check for this and re-locate to the containing jdk dir when present.
if os.path.basename(home) == 'jre':
jdk_dir = os.path.dirname(home)
if self._is_executable(os.path.join(jdk_dir, 'bin', 'javac')):
home = jdk_dir
self._home = home
return self._home
@property
def real_home(self):
"""Real path to the distribution java.home (resolving links)."""
return os.path.realpath(self.home)
@property
def java(self):
"""Returns the path to this distribution's java command.
If this distribution has no valid java command raises Distribution.Error.
"""
return self.binary('java')
def binary(self, name):
"""Returns the path to the command of the given name for this distribution.
For example: ::
>>> d = Distribution()
>>> jar = d.binary('jar')
>>> jar
'/usr/bin/jar'
>>>
If this distribution has no valid command of the given name raises Distribution.Error.
If this distribution is a JDK checks both `bin` and `jre/bin` for the binary.
"""
if not isinstance(name, string_types):
raise ValueError('name must be a binary name, given {} of type {}'.format(name, type(name)))
self.validate()
return self._validated_executable(name)
def validate(self):
"""Validates this distribution against its configured constraints.
Raises Distribution.Error if this distribution is not valid according to the configured
constraints.
"""
if self._validated_binaries:
return
with self._valid_executable('java') as java:
if self._minimum_version:
version = self._get_version(java)
if version < self._minimum_version:
raise self.Error('The java distribution at {} is too old; expecting at least {} and'
' got {}'.format(java, self._minimum_version, version))
if self._maximum_version:
version = self._get_version(java)
if version > self._maximum_version:
raise self.Error('The java distribution at {} is too new; expecting no older than'
' {} and got {}'.format(java, self._maximum_version, version))
# We might be a JDK discovered by the embedded jre `java` executable.
# If so reset the bin path to the true JDK home dir for full access to all binaries.
self._bin_path = os.path.join(self.home, 'bin')
try:
self._validated_executable('javac') # Calling purely for the check and cache side effects
self._is_jdk = True
except self.Error as e:
if self._jdk:
logger.debug('Failed to validate javac executable. Please check you have a JDK '
'installed. Original error: {}'.format(e))
raise
def execute_java(self, *args, **kwargs):
return execute_java(*args, distribution=self, **kwargs)
def execute_java_async(self, *args, **kwargs):
return execute_java_async(*args, distribution=self, **kwargs)
def _get_version(self, java):
if not self._version:
self._version = self._parse_java_version('java.version',
self._get_system_properties(java)['java.version'])
return self._version
def _get_system_properties(self, java):
if not self._system_properties:
with temporary_dir() as classpath:
with open(os.path.join(classpath, 'SystemProperties.class'), 'w+') as fp:
fp.write(pkgutil.get_data(__name__, 'SystemProperties.class'))
cmd = [java, '-cp', classpath, 'SystemProperties']
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise self.Error('Failed to determine java system properties for {} with {} - exit code'
' {}: {}'.format(java, ' '.join(cmd), process.returncode, stderr))
props = {}
for line in stdout.split(os.linesep):
key, _, val = line.partition('=')
props[key] = val
self._system_properties = props
return self._system_properties
def _validate_executable(self, name):
def bin_paths():
yield self._bin_path
if self._is_jdk:
yield os.path.join(self.home, 'jre', 'bin')
for bin_path in bin_paths():
exe = os.path.join(bin_path, name)
if self._is_executable(exe):
return exe
raise self.Error('Failed to locate the {} executable, {} does not appear to be a'
' valid {} distribution'.format(name, self, 'JDK' if self._jdk else 'JRE'))
def _validated_executable(self, name):
exe = self._validated_binaries.get(name)
if not exe:
exe = self._validate_executable(name)
self._validated_binaries[name] = exe
return exe
@contextmanager
def _valid_executable(self, name):
exe = self._validate_executable(name)
yield exe
self._validated_binaries[name] = exe
def __repr__(self):
return ('Distribution({!r}, minimum_version={!r}, maximum_version={!r} jdk={!r})'.format(
self._bin_path, self._minimum_version, self._maximum_version, self._jdk))
class DistributionLocator(Subsystem):
"""Subsystem that knows how to look up a java Distribution.
:API: public
"""
class Error(Distribution.Error):
"""Error locating a java distribution."""
class _Location(namedtuple('Location', ['home_path', 'bin_path'])):
"""Represents the location of a java distribution."""
@classmethod
def from_home(cls, home):
"""Creates a location given the JAVA_HOME directory.
:param string home: The path of the JAVA_HOME directory.
:returns: The java distribution location.
"""
return cls(home_path=home, bin_path=None)
@classmethod
def from_bin(cls, bin_path):
"""Creates a location given the `java` executable parent directory.
:param string bin_path: The parent path of the `java` executable.
:returns: The java distribution location.
"""
return cls(home_path=None, bin_path=bin_path)
options_scope = 'jvm-distributions'
_CACHE = {}
# The `/usr/lib/jvm` dir is a common target of packages built for redhat and debian as well as
# other more exotic distributions. SUSE uses lib64
_JAVA_DIST_DIRS = ['/usr/lib/jvm', '/usr/lib64/jvm']
_OSX_JAVA_HOME_EXE = '/usr/libexec/java_home'
@classmethod
def register_options(cls, register):
super(DistributionLocator, cls).register_options(register)
human_readable_os_aliases = ', '.join('{}: [{}]'.format(str(key), ', '.join(sorted(val)))
for key, val in OS_ALIASES.items())
register('--paths', advanced=True, type=dict,
help='Map of os names to lists of paths to jdks. These paths will be searched before '
'everything else (before the JDK_HOME, JAVA_HOME, PATH environment variables) '
'when locating a jvm to use. The same OS can be specified via several different '
'aliases, according to this map: {}'.format(human_readable_os_aliases))
register('--minimum-version', advanced=True, help='Minimum version of the JVM pants will use')
register('--maximum-version', advanced=True, help='Maximum version of the JVM pants will use')
@memoized_property
def _normalized_jdk_paths(self):
jdk_paths = self.get_options().paths or {}
normalized = {}
for name, paths in sorted(jdk_paths.items()):
rename = normalize_os_name(name)
if rename in normalized:
logger.warning('Multiple OS names alias to "{}"; combining results.'.format(rename))
normalized[rename].extend(paths)
else:
normalized[rename] = paths
return normalized
def all_jdk_paths(self):
"""Get all explicitly configured JDK paths.
:return: mapping of os name -> list of jdk_paths
:rtype: dict of string -> list of string
"""
return self._normalized_jdk_paths
def get_jdk_paths(self, os_name=None):
"""Get the list of explicitly configured JDK paths for this os.
:param os_name: Os name to lookup. If None, use the currently detected os name.
:return: Paths of explicitly configured JDK's from the --jvm-distribution-paths option
:rtype: list of strings
"""
jdk_paths = self._normalized_jdk_paths
if not jdk_paths:
return ()
if os_name is None:
os_name = os.uname()[0].lower()
os_name = normalize_os_name(os_name)
if os_name not in jdk_paths:
logger.warning('--jvm-distributions-paths was specified, but has no entry for "{}".'
.format(os_name))
return jdk_paths.get(os_name, ())
@classmethod
def java_path_locations(cls):
for location in cls.global_instance().get_jdk_paths():
yield cls._Location.from_home(location)
@classmethod
def _scan_constraint_match(cls, minimum_version, maximum_version, jdk):
"""Finds a cached version matching the specified constraints
:param Revision minimum_version: minimum jvm version to look for (eg, 1.7).
:param Revision maximum_version: maximum jvm version to look for (eg, 1.7.9999).
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution, or None if no matching distribution is in the cache.
:rtype: :class:`pants.java.distribution.Distribution`
"""
for dist in cls._CACHE.values():
if minimum_version and dist.version < minimum_version:
continue
if maximum_version and dist.version > maximum_version:
continue
if jdk and not dist.jdk:
continue
return dist
@classmethod
def cached(cls, minimum_version=None, maximum_version=None, jdk=False):
"""Finds a java distribution that meets the given constraints and returns it.
First looks for a cached version that was previously located, otherwise calls locate().
:param minimum_version: minimum jvm version to look for (eg, 1.7).
The stricter of this and get_options().minimum_version is used.
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
The stricter of this and get_options().maximum_version is used.
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the Distribution.
:rtype: :class:`pants.java.distribution.Distribution`
"""
def _get_stricter_version(a, b, name, stricter):
version_a = Distribution._parse_java_version(name, a)
version_b = Distribution._parse_java_version(name, b)
if version_a is None:
return version_b
if version_b is None:
return version_a
return stricter(version_a, version_b)
# take the tighter constraint of method args and subsystem options
minimum_version = _get_stricter_version(minimum_version,
cls.global_instance().get_options().minimum_version,
"minimum_version",
max)
maximum_version = _get_stricter_version(maximum_version,
cls.global_instance().get_options().maximum_version,
"maximum_version",
min)
key = (minimum_version, maximum_version, jdk)
dist = cls._CACHE.get(key)
if not dist:
dist = cls._scan_constraint_match(minimum_version, maximum_version, jdk)
if not dist:
dist = cls.locate(minimum_version=minimum_version,
maximum_version=maximum_version,
jdk=jdk)
cls._CACHE[key] = dist
return dist
@classmethod
def locate(cls, minimum_version=None, maximum_version=None, jdk=False):
"""Finds a java distribution that meets any given constraints and returns it.
Distributions are searched for in the following order:
* Paths listed for this operating system in the
--jvm-distributions-paths map
* JDK_HOME/JAVA_HOME
* PATH
* Likely locations on the file system such as /usr/lib/jvm
:raises: Distribution.Error if no suitable java distribution could
be found.
:param minimum_version: minimum jvm version to look for (eg, 1.7).
:param maximum_version: maximum jvm version to look for (eg, 1.7.9999).
:param bool jdk: whether the found java distribution is required to have a jdk.
:return: the located Distribution.
:rtype: :class:`pants.java.distribution.Distribution`
"""
def search_path():
for location in cls.global_instance().java_path_locations():
yield location
for location in cls.environment_jvm_locations():
yield location
for location in filter(None, search_path()):
try:
dist = Distribution(home_path=location.home_path,
bin_path=location.bin_path,
minimum_version=minimum_version,
maximum_version=maximum_version,
jdk=jdk)
dist.validate()
logger.debug('Located {} for constraints: minimum_version {}, maximum_version {}, jdk {}'
.format(dist, minimum_version, maximum_version, jdk))
return dist
except (ValueError, Distribution.Error):
pass
if minimum_version is not None and maximum_version is not None and maximum_version < minimum_version:
error_format = 'Pants configuration/options led to impossible constraints for {} distribution: minimum_version {}, maximum_version {}'
else:
error_format = 'Failed to locate a {} distribution with minimum_version {}, maximum_version {}'
raise cls.Error(error_format.format('JDK' if jdk else 'JRE', minimum_version, maximum_version))
@classmethod
def _linux_java_homes(cls):
for java_dist_dir in cls._JAVA_DIST_DIRS:
if os.path.isdir(java_dist_dir):
for path in os.listdir(java_dist_dir):
home = os.path.join(java_dist_dir, path)
if os.path.isdir(home):
yield cls._Location.from_home(home)
@classmethod
def _osx_java_homes(cls):
# OSX will have a java_home tool that can be used to locate a unix-compatible java home dir.
#
# See:
# https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man1/java_home.1.html
#
# The `--xml` output looks like so:
# <?xml version="1.0" encoding="UTF-8"?>
# <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN"
# "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
# <plist version="1.0">
# <array>
# <dict>
# ...
# <key>JVMHomePath</key>
# <string>/Library/Java/JavaVirtualMachines/jdk1.7.0_45.jdk/Contents/Home</string>
# ...
# </dict>
# ...
# </array>
# </plist>
if os.path.exists(cls._OSX_JAVA_HOME_EXE):
try:
plist = subprocess.check_output([cls._OSX_JAVA_HOME_EXE, '--failfast', '--xml'])
for distribution in plistlib.readPlistFromString(plist):
home = distribution['JVMHomePath']
yield cls._Location.from_home(home)
except subprocess.CalledProcessError:
pass
@classmethod
def environment_jvm_locations(cls):
def env_home(home_env_var):
home = os.environ.get(home_env_var)
return cls._Location.from_home(home) if home else None
yield env_home('JDK_HOME')
yield env_home('JAVA_HOME')
search_path = os.environ.get('PATH')
if search_path:
for bin_path in search_path.strip().split(os.pathsep):
yield cls._Location.from_bin(bin_path)
for location in cls._linux_java_homes():
yield location
for location in cls._osx_java_homes():
yield location
| dbentley/pants | src/python/pants/java/distribution/distribution.py | Python | apache-2.0 | 21,570 |
from __future__ import print_function
# Time: O(n)
# Space: O(h), h is height of binary tree
#
# Given two binary trees, write a function to check if they are equal or not.
#
# Two binary trees are considered equal if they are structurally identical and the nodes have the same value.
#
# Definition for a binary tree node
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# @param p, a tree node
# @param q, a tree node
# @return a boolean
def isSameTree(self, p, q):
if p is None and q is None:
return True
if p is not None and q is not None:
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
return False
if __name__ == "__main__":
root1, root1.left, root1.right = TreeNode(1), TreeNode(2), TreeNode(3)
root2, root2.left, root2.right = TreeNode(1), TreeNode(2), TreeNode(3)
print(Solution().isSameTree(root1, root2))
| kamyu104/LeetCode | Python/same-tree.py | Python | mit | 1,030 |
from main import BaseHandler
from models.blog_post import blog_key
from google.appengine.ext import ndb
import time
class EditPostHandler(BaseHandler):
"""Edit post if authored by user"""
def get(self):
if self.user:
# retrive post
post_id = self.request.get("post")
key = ndb.Key('BlogPost', int(post_id), parent=blog_key())
post = key.get()
if not post:
self.error(404)
return
else:
if post.author.username == self.user.username:
self.render("editpost.html", post=post)
else:
# redirect to login
error_message = "You are not permitted to edit a post that"\
" you have not created."
self.redirect('/unauthorized?error=' + error_message)
else:
# render login page with message that you have been redirected
error_message = "You can't edit a post without logging in."
self.redirect('/login?error=' + error_message)
def post(self):
post_id = self.request.get("post")
key = ndb.Key('BlogPost', int(post_id), parent=blog_key())
post = key.get()
# check if user is authorised to edit it
if post and post.author.username == self.user.username:
subject = self.request.get("subject")
content = self.request.get("content")
if subject and content:
post.subject = subject
post.content = content
post.put()
time.sleep(0.1)
self.redirect("/blog")
else:
error = "Subject or Content of a blog can't be empty"
self.render("editpost.html", post=post, error=error)
else:
self.redirect("/blog")
| ashutoshpurushottam/wishper-blog | handlers/edit_post.py | Python | apache-2.0 | 1,904 |
""" It is used to test Plotting utilities used to create different plots.
"""
# pylint: disable=invalid-name,wrong-import-position
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import operator
from functools import reduce
# sut
from DIRAC.Core.Utilities.Plotting.Plots import (
generateHistogram,
generateStackedLinePlot,
generatePiePlot,
generateCumulativePlot,
generateQualityPlot,
generateTimedStackedBarPlot,
generateNoDataPlot,
generateErrorMessagePlot,
)
from DIRAC.tests.Utilities.plots import compare
plots_directory = os.path.join(os.path.dirname(__file__), "plots")
filename = "plot.png"
def test_histogram():
"""
test histogram
"""
res = generateHistogram(filename, [2, 2, 3, 4, 5, 5], {})
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "histogram1.png"))
assert res == 0.0
res = generateHistogram(
filename, [{"a": [1, 2, 3, 1, 2, 2, 4, 2]}, {"b": [2, 2, 2, 4, 4, 1, 1]}], {"plot_grid": "2:1"}
)
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "histogram2.png"))
assert res == 0.0
res = generateHistogram(filename, [{"a": [1]}, {"b": [2, 3, 3, 5, 5]}], {})
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "histogram3.png"))
assert res == 0.0
def test_stackedlineplots():
"""
test stacked line plot
"""
res = generateStackedLinePlot(
filename,
{
"LCG.Zoltan.hu": {
1584460800: 1.0,
1584489600: 2.0,
1584511200: 1.0,
1584464400: 1.0,
1584540000: 0.022222222222222223,
1584500400: 2.2,
1584529200: 1.2,
1584468000: 0.0,
1584486000: 1.1,
1584518400: 0.2,
1584471600: 1.0,
1584532800: 0.0022222222222222222,
1584507600: 0.3,
1584475200: 1.2,
1584482400: 0.4,
1584496800: 5.0,
1584525600: 0.5,
1584536400: 0.012777777777777779,
1584514800: 2.0,
1584453600: 3.0,
1584478800: 0.09,
1584504000: 1.0,
1584457200: 3.0,
1584493200: 1.0,
1584522000: 1.8,
},
"LCG.CERN.cern": {
1584460800: 1.6,
1584489600: 2.8,
1584511200: 3.0,
1584464400: 4.0,
1584540000: 1.022222222222222223,
1584500400: 3.2,
1584529200: 0.2,
1584468000: 1.0,
1584486000: 1.1,
},
},
{},
)
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "stackedline.png"))
assert res == 0.0
def test_piechartplot():
"""
test pie chart plots
"""
res = generatePiePlot(filename, {"a": 16.0, "b": 56.0, "c": 15, "d": 20}, {})
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "piechart.png"))
assert res == 0.0
def test_cumulativeplot():
"""
test cumulative stracked line plot
"""
res = generateCumulativePlot(
filename,
{
"User": {
1584460800: 0.0,
1584489600: 0.0,
1584511200: 0.0,
1584464400: 0.0,
1584540000: 16.0,
1584500400: 0.0,
1584529200: 0.0,
1584468000: 0.0,
1584457200: 0.0,
1584518400: 0.0,
1584471600: 0.0,
1584507600: 0.0,
1584475200: 0.0,
1584496800: 0.0,
1584525600: 0.0,
1584536400: 6.0,
1584486000: 0.0,
1584514800: 0.0,
1584482400: 0.0,
1584478800: 0.0,
1584504000: 0.0,
1584532800: 1.0,
1584543600: 21.0,
1584493200: 0.0,
1584522000: 0.0,
}
},
{
"span": 3600,
"title": "Cumulative Jobs by JobType",
"starttime": 1584457326,
"ylabel": "jobs",
"sort_labels": "max_value",
"endtime": 1584543726,
},
)
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "cumulativeplot.png"))
assert res == 0.0
def test_qualityplot():
"""
Test quality plot
"""
res = generateQualityPlot(
filename,
{"User": {1584543600: 37.5, 1584547200: 37.5, 1584619200: 33.33333333333333, 1584601200: 36.53846153846153}},
{},
)
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "qualityplot1.png"))
assert res == 0.0
res = generateQualityPlot(
filename,
{"User": {1584543600: 37.5, 1584547200: 37.5, 1584619200: 33.33333333333333, 1584601200: 36.53846153846153}},
{"endtime": 1584627764, "span": 3600, "starttime": 1584541364, "title": "Job CPU efficiency by JobType"},
)
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "qualityplot2.png"))
assert res == 0.0
def test_timestackedbarplot():
"""
test timed stacked bar plot
"""
res = generateTimedStackedBarPlot(
filename,
{
"LCG.Cern.cern": {
1584662400: 0.0,
1584691200: 0.0,
1584637200: 15.9593220339,
1584666000: 0.0,
1584694800: 0.0,
1584626400: 31.867945823900005,
1584669600: 0.0,
1584644400: 0.0,
1584615600: 0.0,
1584673200: 0.0,
1584633600: 14.0406779661,
1584676800: 0.0,
1584684000: 0.0,
1584651600: 0.0,
1584680400: 0.0,
1584612000: 0.0,
1584640800: 0.0,
1584655200: 0.0,
1584622800: 23.2293933044,
1584698400: 0.0,
1584630000: 9.5970654628,
1584658800: 0.0,
1584648000: 0.0,
1584687600: 12.0,
1584619200: 10.3055954089,
},
"LCG.NCBJ.pl": {
1584691200: 0.1,
1584662400: 2.0,
1584651600: 0.0,
1584637200: 0.0,
1584694800: 4.0,
1584626400: 0.0,
1584669600: 6.0,
1584655200: 0.0,
1584644400: 0.0,
1584615600: 0.0,
1584673200: 0.0,
1584633600: 9.0,
1584676800: 0.0,
1584698400: 0.0,
1584622800: 0.0,
1584680400: 0.0,
1584612000: 0.0,
1584640800: 0.0,
1584684000: 0.0,
1584666000: 0.0,
1584630000: 0.0,
1584658800: 0.0,
1584648000: 0.0,
1584687600: 0.0,
1584619200: 0.0,
},
},
{
"ylabel": "jobs / hour",
"endtime": 1584700844,
"span": 3600,
"starttime": 1584614444,
"title": "Jobs by Site",
},
)
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "timedstackedbarplot.png"))
assert res == 0.0
def test_nodataplot():
"""
Test no data plot
"""
res = generateNoDataPlot(filename, {}, {"title": "Test plot"})
assert res["OK"] is True
res = compare(filename, os.path.join(plots_directory, "nodata.png"))
assert res == 0.0
def test_error():
"""
Test error message plot
"""
res = generateErrorMessagePlot("testing error message")
assert res["OK"] is True
with open(filename, "wb") as out:
out.write(res["Value"])
res = compare(filename, os.path.join(plots_directory, "error.png"))
assert res == 0.0
| ic-hep/DIRAC | tests/Integration/AccountingSystem/Test_Plots.py | Python | gpl-3.0 | 8,387 |
# coding: utf-8
from __future__ import absolute_import
import os
import sys
import unittest
import flockos
from flockos.rest import ApiException
from flockos.models.error import Error
class TestError(unittest.TestCase):
""" Error unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testError(self):
"""
Test Error
"""
model = flockos.models.error.Error()
if __name__ == '__main__':
unittest.main()
| flockchat/pyflock | test/test_error.py | Python | apache-2.0 | 497 |
"""Tests for AVM Fritz!Box sensor component."""
from datetime import timedelta
from unittest.mock import Mock
from requests.exceptions import HTTPError
from homeassistant.components.fritzbox.const import (
ATTR_STATE_DEVICE_LOCKED,
ATTR_STATE_LOCKED,
DOMAIN as FB_DOMAIN,
)
from homeassistant.components.sensor import (
ATTR_STATE_CLASS,
DOMAIN,
STATE_CLASS_MEASUREMENT,
)
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
ATTR_UNIT_OF_MEASUREMENT,
CONF_DEVICES,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant
import homeassistant.util.dt as dt_util
from . import FritzDeviceSensorMock, setup_config_entry
from .const import CONF_FAKE_NAME, MOCK_CONFIG
from tests.common import async_fire_time_changed
ENTITY_ID = f"{DOMAIN}.{CONF_FAKE_NAME}"
async def test_setup(hass: HomeAssistant, fritz: Mock):
"""Test setup of platform."""
device = FritzDeviceSensorMock()
assert await setup_config_entry(
hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz
)
await hass.async_block_till_done()
state = hass.states.get(f"{ENTITY_ID}_temperature")
assert state
assert state.state == "1.23"
assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Temperature"
assert state.attributes[ATTR_STATE_DEVICE_LOCKED] == "fake_locked_device"
assert state.attributes[ATTR_STATE_LOCKED] == "fake_locked"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == TEMP_CELSIUS
assert state.attributes[ATTR_STATE_CLASS] == STATE_CLASS_MEASUREMENT
state = hass.states.get(f"{ENTITY_ID}_battery")
assert state
assert state.state == "23"
assert state.attributes[ATTR_FRIENDLY_NAME] == f"{CONF_FAKE_NAME} Battery"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == PERCENTAGE
assert ATTR_STATE_CLASS not in state.attributes
async def test_update(hass: HomeAssistant, fritz: Mock):
"""Test update without error."""
device = FritzDeviceSensorMock()
assert await setup_config_entry(
hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz
)
assert device.update.call_count == 1
assert fritz().login.call_count == 1
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert device.update.call_count == 2
assert fritz().login.call_count == 1
async def test_update_error(hass: HomeAssistant, fritz: Mock):
"""Test update with error."""
device = FritzDeviceSensorMock()
device.update.side_effect = HTTPError("Boom")
assert not await setup_config_entry(
hass, MOCK_CONFIG[FB_DOMAIN][CONF_DEVICES][0], ENTITY_ID, device, fritz
)
assert device.update.call_count == 1
assert fritz().login.call_count == 1
next_update = dt_util.utcnow() + timedelta(seconds=200)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert device.update.call_count == 2
assert fritz().login.call_count == 2
| Danielhiversen/home-assistant | tests/components/fritzbox/test_sensor.py | Python | apache-2.0 | 3,081 |
from rest_framework import serializers
from cosmopolitan.models import Continent
from cosmopolitan.models import Currency
from cosmopolitan.models import Country
from cosmopolitan.models import City
from cosmopolitan.models import Region
from cosmopolitan.models import Postcode
from cosmopolitan.models import Polygon
from cosmopolitan.serializers.common import CurrencySerializer
from cosmopolitan.serializers.common import CountrySerializer
from cosmopolitan.serializers.common import ContinentSerializer
from cosmopolitan.serializers.common import CitySerializer
from cosmopolitan.serializers.common import RegionSerializer
from cosmopolitan.serializers.common import PostcodeSerializer
from cosmopolitan.serializers.common import PolygonSerializer
from cosmopolitan.serializers.internal import CountryShortSerializer
from cosmopolitan.serializers.internal import ContinentShortSerializer
from cosmopolitan.serializers.internal import CurrencyShortSerializer
from cosmopolitan.serializers.internal import RegionShortSerializer
from cosmopolitan.serializers.internal import ContinentWithRelatedSerializer
class CountryPolygonListSerializer(PolygonSerializer):
url = serializers.HyperlinkedIdentityField(view_name='countrypolygon-detail')
class Meta:
model = Polygon
fields = ('id', 'url', 'type', 'type_id')
class CountryPolygonDetailSerializer(PolygonSerializer):
url = serializers.HyperlinkedIdentityField(view_name='countrypolygon-detail')
class Meta:
model = Polygon
fields = ('id', 'url', 'type', 'type_id', 'polygon')
class CityPolygonListSerializer(PolygonSerializer):
url = serializers.HyperlinkedIdentityField(view_name='citypolygon-detail')
class Meta:
model = Polygon
fields = ('id', 'url', 'type', 'type_id')
class CityPolygonDetailSerializer(PolygonSerializer):
url = serializers.HyperlinkedIdentityField(view_name='citypolygon-detail')
class Meta:
model = Polygon
fields = ('id', 'url', 'type', 'type_id', 'polygon')
class RegionPolygonListSerializer(PolygonSerializer):
url = serializers.HyperlinkedIdentityField(view_name='regionpolygon-detail')
class Meta:
model = Polygon
fields = ('id', 'url', 'type', 'type_id')
class RegionPolygonDetailSerializer(PolygonSerializer):
url = serializers.HyperlinkedIdentityField(view_name='regionpolygon-detail')
class Meta:
model = Polygon
fields = ('id', 'url', 'type', 'type_id', 'polygon')
class PolygonListSerializer(PolygonSerializer):
url = serializers.HyperlinkedIdentityField(view_name='polygon-detail')
class Meta:
model = Polygon
fields = ('id', 'url', 'type', 'type_id')
class PolygonDetailSerializer(PolygonSerializer):
url = serializers.HyperlinkedIdentityField(view_name='polygon-detail')
class Meta:
model = Polygon
fields = ('id', 'url', 'type', 'type_id', 'polygon')
class CurrencyListSerializer(CurrencySerializer):
countries = CountryShortSerializer(many=True, read_only=True)
class Meta:
model = Currency
fields = ('id', 'url', 'name', 'countries')
class CurrencyDetailSerializer(CurrencySerializer):
countries = CountrySerializer(many=True, read_only=True)
continents = ContinentSerializer(many=True, read_only=True)
class Meta:
model = Currency
fields = ('id', 'url', 'name', 'countries', 'continents')
class ContinentListSerializer(ContinentSerializer):
countries = CountryShortSerializer(many=True, read_only=True)
class Meta:
model = Continent
fields = ('id', 'url', 'name', 'countries')
class ContinentDetailSerializer(ContinentSerializer):
countries = CountrySerializer(many=True, read_only=True)
currencies = CurrencySerializer(many=True, read_only=True)
class Meta:
model = Continent
fields = ('id', 'url', 'name', 'countries', 'currencies')
class CountryListSerializer(CountrySerializer):
currency = CurrencyShortSerializer()
continent = ContinentShortSerializer()
class Meta:
model = Country
fields = ('id', 'url', 'name', 'continent', 'currency')
class CountryDetailSerializer(CountrySerializer):
currency = CurrencySerializer()
continent = ContinentWithRelatedSerializer()
class Meta:
model = Country
fields = ('id', 'url', 'name', 'continent', 'currency')
class CityListSerializer(CitySerializer):
country = CountryShortSerializer()
region = RegionShortSerializer(allow_null=True)
class Meta:
model = City
fields = ('id', 'url', 'name', 'name_std', 'kind', 'country', 'region', 'slug')
class CityDetailSerializer(CitySerializer):
country = CountryShortSerializer()
region = RegionShortSerializer(allow_null=True)
class Meta:
model = City
fields = ('id', 'slug', 'url', 'name', 'name_std', 'kind', 'country',
'region', 'location', 'population', 'elevation', 'timezone')
class RegionListSerializer(RegionSerializer):
country = CountryShortSerializer()
class Meta:
model = Region
fields = ('id', 'url', 'name', 'country')
class RegionDetailSerializer(RegionSerializer):
country = CountrySerializer()
continent = ContinentSerializer(source='country.continent')
class Meta:
model = Region
fields = ('id', 'url', 'name', 'country', 'continent')
class PostcodeListSerializer(PostcodeSerializer):
country = CountryShortSerializer()
region = RegionShortSerializer()
class Meta:
model = Postcode
fields = ('id', 'url', 'country', 'region')
class PostcodeDetailSerializer(PostcodeSerializer):
country = CountryListSerializer()
region = RegionSerializer()
class Meta:
model = Postcode
fields = ('id', 'url', 'country', 'region')
| openspending/cosmopolitan | cosmopolitan/serializers/specific.py | Python | mit | 5,905 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import excutils
import six
from heat.common import exception
from heat.common import grouputils
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import function
from heat.engine.notification import autoscaling as notification
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources.openstack.heat import instance_group as instgrp
from heat.engine import rsrc_defn
from heat.engine import support
from heat.scaling import cooldown
from heat.scaling import scalingutil as sc_util
LOG = logging.getLogger(__name__)
class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, MAX_SIZE, MIN_SIZE,
COOLDOWN, DESIRED_CAPACITY, HEALTH_CHECK_GRACE_PERIOD,
HEALTH_CHECK_TYPE, LOAD_BALANCER_NAMES, VPCZONE_IDENTIFIER, TAGS,
INSTANCE_ID,
) = (
'AvailabilityZones', 'LaunchConfigurationName', 'MaxSize', 'MinSize',
'Cooldown', 'DesiredCapacity', 'HealthCheckGracePeriod',
'HealthCheckType', 'LoadBalancerNames', 'VPCZoneIdentifier', 'Tags',
'InstanceId',
)
_TAG_KEYS = (
TAG_KEY, TAG_VALUE,
) = (
'Key', 'Value',
)
_UPDATE_POLICY_SCHEMA_KEYS = (
ROLLING_UPDATE
) = (
'AutoScalingRollingUpdate'
)
_ROLLING_UPDATE_SCHEMA_KEYS = (
MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME
) = (
'MinInstancesInService', 'MaxBatchSize', 'PauseTime'
)
ATTRIBUTES = (
INSTANCE_LIST,
) = (
'InstanceList',
)
properties_schema = {
AVAILABILITY_ZONES: properties.Schema(
properties.Schema.LIST,
_('Not Implemented.'),
required=True
),
LAUNCH_CONFIGURATION_NAME: properties.Schema(
properties.Schema.STRING,
_('The reference to a LaunchConfiguration resource.'),
update_allowed=True
),
INSTANCE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of an existing instance to use to '
'create the Auto Scaling group. If specify this property, '
'will create the group use an existing instance instead of '
'a launch configuration.'),
constraints=[
constraints.CustomConstraint("nova.server")
]
),
MAX_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Maximum number of instances in the group.'),
required=True,
update_allowed=True
),
MIN_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Minimum number of instances in the group.'),
required=True,
update_allowed=True
),
COOLDOWN: properties.Schema(
properties.Schema.INTEGER,
_('Cooldown period, in seconds.'),
update_allowed=True
),
DESIRED_CAPACITY: properties.Schema(
properties.Schema.INTEGER,
_('Desired initial number of instances.'),
update_allowed=True
),
HEALTH_CHECK_GRACE_PERIOD: properties.Schema(
properties.Schema.INTEGER,
_('Not Implemented.'),
implemented=False
),
HEALTH_CHECK_TYPE: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
constraints=[
constraints.AllowedValues(['EC2', 'ELB']),
],
implemented=False
),
LOAD_BALANCER_NAMES: properties.Schema(
properties.Schema.LIST,
_('List of LoadBalancer resources.')
),
VPCZONE_IDENTIFIER: properties.Schema(
properties.Schema.LIST,
_('Use only with Neutron, to list the internal subnet to '
'which the instance will be attached; '
'needed only if multiple exist; '
'list length must be exactly 1.'),
schema=properties.Schema(
properties.Schema.STRING,
_('UUID of the internal subnet to which the instance '
'will be attached.')
)
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('Tags to attach to this group.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
TAG_KEY: properties.Schema(
properties.Schema.STRING,
required=True
),
TAG_VALUE: properties.Schema(
properties.Schema.STRING,
required=True
),
},
)
),
}
attributes_schema = {
INSTANCE_LIST: attributes.Schema(
_("A comma-delimited list of server ip addresses. "
"(Heat extension)."),
type=attributes.Schema.STRING
),
}
rolling_update_schema = {
MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER,
default=0),
MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER,
default=1),
PAUSE_TIME: properties.Schema(properties.Schema.STRING,
default='PT0S')
}
update_policy_schema = {
ROLLING_UPDATE: properties.Schema(properties.Schema.MAP,
schema=rolling_update_schema)
}
def handle_create(self):
self.validate_launchconfig()
return self.create_with_template(self.child_template())
def _make_launch_config_resource(self, name, props):
lc_res_type = 'AWS::AutoScaling::LaunchConfiguration'
lc_res_def = rsrc_defn.ResourceDefinition(name,
lc_res_type,
props)
lc_res = resource.Resource(name, lc_res_def, self.stack)
return lc_res
def _get_conf_properties(self):
instance_id = self.properties.get(self.INSTANCE_ID)
if instance_id:
server = self.client_plugin('nova').get_server(instance_id)
instance_props = {
'ImageId': server.image['id'],
'InstanceType': server.flavor['id'],
'KeyName': server.key_name,
'SecurityGroups': [sg['name']
for sg in server.security_groups]
}
conf = self._make_launch_config_resource(self.name,
instance_props)
props = function.resolve(conf.properties.data)
else:
conf, props = super(AutoScalingGroup, self)._get_conf_properties()
vpc_zone_ids = self.properties.get(self.VPCZONE_IDENTIFIER)
if vpc_zone_ids:
props['SubnetId'] = vpc_zone_ids[0]
return conf, props
def check_create_complete(self, task):
"""Invoke the cooldown after creation succeeds."""
done = super(AutoScalingGroup, self).check_create_complete(task)
if done:
self._cooldown_timestamp(
"%s : %s" % (sc_util.CFN_EXACT_CAPACITY,
grouputils.get_size(self)))
return done
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""
If Properties has changed, update self.properties, so we get the new
values during any subsequent adjustment.
"""
if tmpl_diff:
# parse update policy
if 'UpdatePolicy' in tmpl_diff:
up = json_snippet.update_policy(self.update_policy_schema,
self.context)
self.update_policy = up
self.properties = json_snippet.properties(self.properties_schema,
self.context)
if prop_diff:
# Replace instances first if launch configuration has changed
self._try_rolling_update(prop_diff)
if self.properties[self.DESIRED_CAPACITY] is not None:
self.adjust(self.properties[self.DESIRED_CAPACITY],
adjustment_type=sc_util.CFN_EXACT_CAPACITY)
else:
current_capacity = grouputils.get_size(self)
self.adjust(current_capacity,
adjustment_type=sc_util.CFN_EXACT_CAPACITY)
def adjust(self, adjustment,
adjustment_type=sc_util.CFN_CHANGE_IN_CAPACITY,
min_adjustment_step=None, signal=False):
"""
Adjust the size of the scaling group if the cooldown permits.
"""
if self._cooldown_inprogress():
LOG.info(_LI("%(name)s NOT performing scaling adjustment, "
"cooldown %(cooldown)s"),
{'name': self.name,
'cooldown': self.properties[self.COOLDOWN]})
if signal:
raise exception.NoActionRequired()
else:
return
capacity = grouputils.get_size(self)
lower = self.properties[self.MIN_SIZE]
upper = self.properties[self.MAX_SIZE]
new_capacity = sc_util.calculate_new_capacity(capacity, adjustment,
adjustment_type,
min_adjustment_step,
lower, upper)
# send a notification before, on-error and on-success.
notif = {
'stack': self.stack,
'adjustment': adjustment,
'adjustment_type': adjustment_type,
'capacity': capacity,
'groupname': self.FnGetRefId(),
'message': _("Start resizing the group %(group)s") % {
'group': self.FnGetRefId()},
'suffix': 'start',
}
notification.send(**notif)
try:
self.resize(new_capacity)
except Exception as resize_ex:
with excutils.save_and_reraise_exception():
try:
notif.update({'suffix': 'error',
'message': six.text_type(resize_ex),
'capacity': grouputils.get_size(self),
})
notification.send(**notif)
except Exception:
LOG.exception(_LE('Failed sending error notification'))
else:
notif.update({
'suffix': 'end',
'capacity': new_capacity,
'message': _("End resizing the group %(group)s") % {
'group': notif['groupname']},
})
notification.send(**notif)
finally:
self._cooldown_timestamp("%s : %s" % (adjustment_type,
adjustment))
def _tags(self):
"""Add Identifing Tags to all servers in the group.
This is so the Dimensions received from cfn-push-stats all include
the groupname and stack id.
Note: the group name must match what is returned from FnGetRefId
"""
autoscaling_tag = [{self.TAG_KEY: 'metering.AutoScalingGroupName',
self.TAG_VALUE: self.FnGetRefId()}]
return super(AutoScalingGroup, self)._tags() + autoscaling_tag
def validate(self):
# check validity of group size
min_size = self.properties[self.MIN_SIZE]
max_size = self.properties[self.MAX_SIZE]
if max_size < min_size:
msg = _("MinSize can not be greater than MaxSize")
raise exception.StackValidationFailed(message=msg)
if min_size < 0:
msg = _("The size of AutoScalingGroup can not be less than zero")
raise exception.StackValidationFailed(message=msg)
if self.properties[self.DESIRED_CAPACITY] is not None:
desired_capacity = self.properties[self.DESIRED_CAPACITY]
if desired_capacity < min_size or desired_capacity > max_size:
msg = _("DesiredCapacity must be between MinSize and MaxSize")
raise exception.StackValidationFailed(message=msg)
# TODO(pasquier-s): once Neutron is able to assign subnets to
# availability zones, it will be possible to specify multiple subnets.
# For now, only one subnet can be specified. The bug #1096017 tracks
# this issue.
if (self.properties.get(self.VPCZONE_IDENTIFIER) and
len(self.properties[self.VPCZONE_IDENTIFIER]) != 1):
raise exception.NotSupported(feature=_("Anything other than one "
"VPCZoneIdentifier"))
# validate properties InstanceId and LaunchConfigurationName
# for aws auto scaling group.
# should provide just only one of
if self.type() == 'AWS::AutoScaling::AutoScalingGroup':
instanceId = self.properties.get(self.INSTANCE_ID)
launch_config = self.properties.get(
self.LAUNCH_CONFIGURATION_NAME)
if bool(instanceId) == bool(launch_config):
msg = _("Either 'InstanceId' or 'LaunchConfigurationName' "
"must be provided.")
raise exception.StackValidationFailed(message=msg)
super(AutoScalingGroup, self).validate()
def _resolve_attribute(self, name):
'''
heat extension: "InstanceList" returns comma delimited list of server
ip addresses.
'''
if name == self.INSTANCE_LIST:
return u','.join(inst.FnGetAtt('PublicIp')
for inst in grouputils.get_members(self)) or None
def child_template(self):
if self.properties[self.DESIRED_CAPACITY]:
num_instances = self.properties[self.DESIRED_CAPACITY]
else:
num_instances = self.properties[self.MIN_SIZE]
return self._create_template(num_instances)
def resource_mapping():
return {
'AWS::AutoScaling::AutoScalingGroup': AutoScalingGroup,
}
| takeshineshiro/heat | heat/engine/resources/aws/autoscaling/autoscaling_group.py | Python | apache-2.0 | 15,254 |
"""Holds numba or numba mock in case we are not using numba"""
from ddm.conf import DDMConfig, NUMEXPR_INSTALLED
if DDMConfig.numexpr == False or NUMEXPR_INSTALLED == False:
if DDMConfig.numexpr:
import warnings
warnings.warn("Numexpr is not installed! Numexpr acceleration disabled.")
#fake numexpr...
class numexpr:
@staticmethod
def evaluate(expr,**kw):
eval(expr)
USE_NUMEXPR = False
else:
import numexpr
USE_NUMEXPR = True | andrej5elin/ddm | ddm/core/_numexpr.py | Python | gpl-3.0 | 523 |
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import errno
import os
import sys
from datetime import datetime
from functools import partial
import time
_socket = __import__("socket")
# workaround on osx, disable kqueue
if sys.platform == "darwin":
os.environ['EVENT_NOKQUEUE'] = "1"
try:
import gevent
except ImportError:
raise RuntimeError("You need gevent installed to use this worker.")
from gevent.pool import Pool
from gevent.server import StreamServer
from gevent.socket import wait_write, socket
from gevent import pywsgi
import gunicorn
from gunicorn.http.wsgi import base_environ
from gunicorn.workers.async import AsyncWorker
from gunicorn.http.wsgi import sendfile as o_sendfile
VERSION = "gevent/%s gunicorn/%s" % (gevent.__version__, gunicorn.__version__)
def _gevent_sendfile(fdout, fdin, offset, nbytes):
while True:
try:
return o_sendfile(fdout, fdin, offset, nbytes)
except OSError as e:
if e.args[0] == errno.EAGAIN:
wait_write(fdout)
else:
raise
def patch_sendfile():
from gunicorn.http import wsgi
if o_sendfile is not None:
setattr(wsgi, "sendfile", _gevent_sendfile)
class GeventWorker(AsyncWorker):
server_class = None
wsgi_handler = None
def patch(self):
from gevent import monkey
monkey.noisy = False
# if the new version is used make sure to patch subprocess
if gevent.version_info[0] == 0:
monkey.patch_all()
else:
monkey.patch_all(subprocess=True)
# monkey patch sendfile to make it none blocking
patch_sendfile()
# patch sockets
sockets = []
for s in self.sockets:
sockets.append(socket(s.FAMILY, _socket.SOCK_STREAM,
_sock=s))
self.sockets = sockets
def notify(self):
super(GeventWorker, self).notify()
if self.ppid != os.getppid():
self.log.info("Parent changed, shutting down: %s", self)
sys.exit(0)
def timeout_ctx(self):
return gevent.Timeout(self.cfg.keepalive, False)
def run(self):
servers = []
ssl_args = {}
if self.cfg.is_ssl:
ssl_args = dict(server_side=True, **self.cfg.ssl_options)
for s in self.sockets:
s.setblocking(1)
pool = Pool(self.worker_connections)
if self.server_class is not None:
environ = base_environ(self.cfg)
environ.update({
"wsgi.multithread": True,
"SERVER_SOFTWARE": VERSION,
})
server = self.server_class(
s, application=self.wsgi, spawn=pool, log=self.log,
handler_class=self.wsgi_handler, environ=environ,
**ssl_args)
else:
hfun = partial(self.handle, s)
server = StreamServer(s, handle=hfun, spawn=pool, **ssl_args)
server.start()
servers.append(server)
try:
while self.alive:
self.notify()
gevent.sleep(1.0)
except KeyboardInterrupt:
pass
except:
for server in servers:
try:
server.stop()
except:
pass
raise
try:
# Stop accepting requests
for server in servers:
if hasattr(server, 'close'): # gevent 1.0
server.close()
if hasattr(server, 'kill'): # gevent < 1.0
server.kill()
# Handle current requests until graceful_timeout
ts = time.time()
while time.time() - ts <= self.cfg.graceful_timeout:
accepting = 0
for server in servers:
if server.pool.free_count() != server.pool.size:
accepting += 1
# if no server is accepting a connection, we can exit
if not accepting:
return
self.notify()
gevent.sleep(1.0)
# Force kill all active the handlers
self.log.warning("Worker graceful timeout (pid:%s)" % self.pid)
[server.stop(timeout=1) for server in servers]
except:
pass
def handle_request(self, *args):
try:
super(GeventWorker, self).handle_request(*args)
except gevent.GreenletExit:
pass
except SystemExit:
pass
if gevent.version_info[0] == 0:
def init_process(self):
# monkey patch here
self.patch()
# reinit the hub
import gevent.core
gevent.core.reinit()
#gevent 0.13 and older doesn't reinitialize dns for us after forking
#here's the workaround
gevent.core.dns_shutdown(fail_requests=1)
gevent.core.dns_init()
super(GeventWorker, self).init_process()
else:
def init_process(self):
# monkey patch here
self.patch()
# reinit the hub
from gevent import hub
hub.reinit()
# then initialize the process
super(GeventWorker, self).init_process()
class GeventResponse(object):
status = None
headers = None
response_length = None
def __init__(self, status, headers, clength):
self.status = status
self.headers = headers
self.response_length = clength
class PyWSGIHandler(pywsgi.WSGIHandler):
def log_request(self):
start = datetime.fromtimestamp(self.time_start)
finish = datetime.fromtimestamp(self.time_finish)
response_time = finish - start
resp_headers = getattr(self, 'response_headers', {})
resp = GeventResponse(self.status, resp_headers, self.response_length)
if hasattr(self, 'headers'):
req_headers = [h.split(":", 1) for h in self.headers.headers]
else:
req_headers = []
self.server.log.access(resp, req_headers, self.environ, response_time)
def get_environ(self):
env = super(PyWSGIHandler, self).get_environ()
env['gunicorn.sock'] = self.socket
env['RAW_URI'] = self.path
return env
class PyWSGIServer(pywsgi.WSGIServer):
pass
class GeventPyWSGIWorker(GeventWorker):
"The Gevent StreamServer based workers."
server_class = PyWSGIServer
wsgi_handler = PyWSGIHandler
| 1stvamp/gunicorn | gunicorn/workers/ggevent.py | Python | mit | 6,721 |
import os
import glob
"""Some utility function are here.
"""
def is_number(s):
""" Check is argument a number or not.
Args:
s: any unicode symbol.
Returns:
bool: True if number, otherwise False.
Rises:
TypeError, ValueError.
"""
try:
float(s)
return True
except ValueError:
pass
try:
import unicodedata
unicodedata.numeric(s)
return True
except (TypeError, ValueError):
pass
return False
def argv_resolver(argv):
"""Resolve command line args.
Args:
Argv: array of command line args getting with sys.argv().
Returns:
fromFile: text file contain input data.
toFile: name for output file, must be *.mid.
OR
bool: False if problem occur.
Errors: string with error message.
"""
if len(argv) > 1:
fromFile = argv[1]
if not(argv[1][-4:] == '.txt'):
return False, 'Error: input file must be *.txt'
if not(os.path.isfile(fromFile)):
return False, ' '.join(['Error: ', fromFile, 'file not found.'])
if len(argv) == 3:
if not(argv[2][-4:] == '.mid'):
return False, 'Error: output file must be *.mid'
toFile = argv[2];
else:
toFile = '.'.join([argv[1][0:-4], 'mid'])
else:
if (len(glob.glob("*.txt"))) > 0:
fromFile = glob.glob("*.txt")[0]
toFile = '.'.join([fromFile[0:-4], 'mid'])
else:
return False, usingHelp
return fromFile, toFile
def compute_params( imgSize, paddingMin, fontSize, count, antialiasing ):
"""Calculate params for neat symbol positioning.
Args:
imgSize: tuple ( width, height ) of output image.
paddingMin: minimal padding from image border.
fontSize: font size in pt.
count: tuple ( columns, row ) of symbol.
antialiasing: oversampling size.
Returns:
padding: tuple ( x,y )
interval: tuple ( x,y )
"""
glyphX = fontSize*0.5*antialiasing
glyphY = fontSize*0.6*antialiasing
paddingX = (imgSize[0]*antialiasing - ((imgSize[0]*antialiasing-paddingMin*2*antialiasing)/count[0])*count[0])/2
paddingY = (imgSize[1]*antialiasing - ((imgSize[1]*antialiasing-paddingMin*2*antialiasing)/count[1])*count[1])/2
intervalX = (imgSize[0]*antialiasing-paddingX*2-glyphX*count[0])/float(count[0]-1)*0.99
intervalY = (imgSize[1]*antialiasing-paddingY*2-glyphY*count[1])/float(count[1]-1)*0.96
return ( paddingX, paddingY ), ( intervalX+glyphX, intervalY+glyphY )
"""Strings
"""
usingHelp = 'PiToMidi - convert txt to midi. Text file must contain numbers. Dots, commas, etc is allowed.\nUsing:\nPiToMidi input.txt\nor\nPiToMidi input.txt output.mid\nor\nJust place *.txt in current dir.'
def bar(x):
return {
0: '|',
1: '/',
2: '-',
3: '\\',
}[x]
| TheLongRunSmoke/PiToMidi | libs/utils.py | Python | gpl-2.0 | 2,947 |
#!/usr/bin/env python
########################################################################
# File : dirac-version
# Author : Ricardo Graciani
########################################################################
"""
Print version of current DIRAC installation
Usage:
dirac-version [option]
Example:
$ dirac-version
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__RCSID__ = "$Id$"
import argparse
import DIRAC
from DIRAC.Core.Utilities.DIRACScript import DIRACScript as Script
@Script()
def main():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.parse_known_args()
print(DIRAC.version)
if __name__ == "__main__":
main()
| ic-hep/DIRAC | src/DIRAC/Core/scripts/dirac_version.py | Python | gpl-3.0 | 817 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from collections import defaultdict
from collections import MutableMapping
from jinja2.exceptions import UndefinedError
try:
from hashlib import sha1
except ImportError:
from sha import sha as sha1
from ansible import constants as C
from ansible.cli import CLI
from ansible.errors import *
from ansible.parsing import DataLoader
from ansible.plugins.cache import FactCache
from ansible.template import Templar
from ansible.utils.debug import debug
from ansible.vars.hostvars import HostVars
CACHED_VARS = dict()
class VariableManager:
def __init__(self):
self._fact_cache = FactCache()
self._vars_cache = defaultdict(dict)
self._extra_vars = defaultdict(dict)
self._host_vars_files = defaultdict(dict)
self._group_vars_files = defaultdict(dict)
self._inventory = None
self._omit_token = '__omit_place_holder__%s' % sha1(os.urandom(64)).hexdigest()
def _get_cache_entry(self, play=None, host=None, task=None):
play_id = "NONE"
if play:
play_id = play._uuid
host_id = "NONE"
if host:
host_id = host.get_name()
task_id = "NONE"
if task:
task_id = task._uuid
return "PLAY:%s;HOST:%s;TASK:%s" % (play_id, host_id, task_id)
@property
def extra_vars(self):
''' ensures a clean copy of the extra_vars are made '''
return self._extra_vars.copy()
@extra_vars.setter
def extra_vars(self, value):
''' ensures a clean copy of the extra_vars are used to set the value '''
assert isinstance(value, MutableMapping)
self._extra_vars = value.copy()
def set_inventory(self, inventory):
self._inventory = inventory
def _validate_both_dicts(self, a, b):
'''
Validates that both arguments are dictionaries, or an error is raised.
'''
if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
raise AnsibleError("failed to combine variables, expected dicts but got a '%s' and a '%s'" % (type(a).__name__, type(b).__name__))
def _combine_vars(self, a, b):
'''
Combines dictionaries of variables, based on the hash behavior
'''
self._validate_both_dicts(a, b)
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return self._merge_dicts(a, b)
else:
return dict(a.items() + b.items())
def _merge_dicts(self, a, b):
'''
Recursively merges dict b into a, so that keys
from b take precedence over keys from a.
'''
result = dict()
self._validate_both_dicts(a, b)
for dicts in a, b:
# next, iterate over b keys and values
for k, v in dicts.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = self._merge_dicts(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def get_vars(self, loader, play=None, host=None, task=None, use_cache=True):
'''
Returns the variables, with optional "context" given via the parameters
for the play, host, and task (which could possibly result in different
sets of variables being returned due to the additional context).
The order of precedence is:
- play->roles->get_default_vars (if there is a play context)
- group_vars_files[host] (if there is a host context)
- host_vars_files[host] (if there is a host context)
- host->get_vars (if there is a host context)
- fact_cache[host] (if there is a host context)
- vars_cache[host] (if there is a host context)
- play vars (if there is a play context)
- play vars_files (if there's no host context, ignore
file names that cannot be templated)
- task->get_vars (if there is a task context)
- extra vars
'''
debug("in VariableManager get_vars()")
cache_entry = self._get_cache_entry(play=play, host=host, task=task)
if cache_entry in CACHED_VARS and use_cache:
debug("vars are cached, returning them now")
return CACHED_VARS[cache_entry]
all_vars = defaultdict(dict)
if play:
# first we compile any vars specified in defaults/main.yml
# for all roles within the specified play
for role in play.get_roles():
all_vars = self._combine_vars(all_vars, role.get_default_vars())
if host:
# next, if a host is specified, we load any vars from group_vars
# files and then any vars from host_vars files which may apply to
# this host or the groups it belongs to
# we merge in the special 'all' group_vars first, if they exist
if 'all' in self._group_vars_files:
all_vars = self._combine_vars(all_vars, self._group_vars_files['all'])
for group in host.get_groups():
all_vars = self._combine_vars(all_vars, group.get_vars())
if group.name in self._group_vars_files and group.name != 'all':
all_vars = self._combine_vars(all_vars, self._group_vars_files[group.name])
host_name = host.get_name()
if host_name in self._host_vars_files:
all_vars = self._combine_vars(all_vars, self._host_vars_files[host_name])
# then we merge in vars specified for this host
all_vars = self._combine_vars(all_vars, host.get_vars())
# next comes the facts cache and the vars cache, respectively
try:
all_vars = self._combine_vars(all_vars, self._fact_cache.get(host.name, dict()))
except KeyError:
pass
if play:
all_vars = self._combine_vars(all_vars, play.get_vars())
for vars_file_item in play.get_vars_files():
try:
# create a set of temporary vars here, which incorporate the
# extra vars so we can properly template the vars_files entries
temp_vars = self._combine_vars(all_vars, self._extra_vars)
templar = Templar(loader=loader, variables=temp_vars)
# we assume each item in the list is itself a list, as we
# support "conditional includes" for vars_files, which mimics
# the with_first_found mechanism.
vars_file_list = templar.template(vars_file_item)
if not isinstance(vars_file_list, list):
vars_file_list = [ vars_file_list ]
# now we iterate through the (potential) files, and break out
# as soon as we read one from the list. If none are found, we
# raise an error, which is silently ignored at this point.
for vars_file in vars_file_list:
data = loader.load_from_file(vars_file)
if data is not None:
all_vars = self._combine_vars(all_vars, data)
break
else:
raise AnsibleError("vars file %s was not found" % vars_file_item)
except AnsibleError, e:
# FIXME: get_vars should probably be taking a flag to determine
# whether or not vars files errors should be fatal at this
# stage, or just base it on whether a host was specified?
pass
except UndefinedError, e:
continue
if not C.DEFAULT_PRIVATE_ROLE_VARS:
for role in play.get_roles():
all_vars = self._combine_vars(all_vars, role.get_vars())
if host:
all_vars = self._combine_vars(all_vars, self._vars_cache.get(host.get_name(), dict()))
if task:
if task._role:
all_vars = self._combine_vars(all_vars, task._role.get_vars())
all_vars = self._combine_vars(all_vars, task.get_vars())
all_vars = self._combine_vars(all_vars, self._extra_vars)
# FIXME: make sure all special vars are here
# Finally, we create special vars
all_vars['playbook_dir'] = loader.get_basedir()
if host:
all_vars['groups'] = [group.name for group in host.get_groups()]
if self._inventory is not None:
hostvars = HostVars(vars_manager=self, play=play, inventory=self._inventory, loader=loader)
all_vars['hostvars'] = hostvars
all_vars['groups'] = self._inventory.groups_list()
if task:
if task._role:
all_vars['role_path'] = task._role._role_path
if self._inventory is not None:
all_vars['inventory_dir'] = self._inventory.basedir()
if play:
# add the list of hosts in the play, as adjusted for limit/filters
# FIXME: play_hosts should be deprecated in favor of ansible_play_hosts,
# however this would take work in the templating engine, so for now
# we'll add both so we can give users something transitional to use
host_list = [x.name for x in self._inventory.get_hosts()]
all_vars['play_hosts'] = host_list
all_vars['ansible_play_hosts'] = host_list
# the 'omit' value alows params to be left out if the variable they are based on is undefined
all_vars['omit'] = self._omit_token
all_vars['ansible_version'] = CLI.version_info(gitinfo=False)
# make vars self referential, so people can do things like 'vars[var_name]'
copied_vars = all_vars.copy()
if 'hostvars' in copied_vars:
del copied_vars['hostvars']
all_vars['vars'] = all_vars.copy()
#CACHED_VARS[cache_entry] = all_vars
debug("done with get_vars()")
return all_vars
def _get_inventory_basename(self, path):
'''
Returns the bsaename minus the extension of the given path, so the
bare filename can be matched against host/group names later
'''
(name, ext) = os.path.splitext(os.path.basename(path))
if ext not in ('.yml', '.yaml'):
return os.path.basename(path)
else:
return name
def _load_inventory_file(self, path, loader):
'''
helper function, which loads the file and gets the
basename of the file without the extension
'''
if loader.is_directory(path):
data = dict()
try:
names = loader.list_directory(path)
except os.error as err:
raise AnsibleError("This folder cannot be listed: %s: %s." % (path, err.strerror))
# evaluate files in a stable order rather than whatever
# order the filesystem lists them.
names.sort()
# do not parse hidden files or dirs, e.g. .svn/
paths = [os.path.join(path, name) for name in names if not name.startswith('.')]
for p in paths:
_found, results = self._load_inventory_file(path=p, loader=loader)
if results is not None:
data = self._combine_vars(data, results)
else:
file_name, ext = os.path.splitext(path)
data = None
if not ext:
for ext in C.YAML_FILENAME_EXTENSIONS:
new_path = path + ext
if loader.path_exists(new_path):
data = loader.load_from_file(new_path)
break
else:
if loader.path_exists(path):
data = loader.load_from_file(path)
name = self._get_inventory_basename(path)
return (name, data)
def add_host_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
self._host_vars_files[name] = data
return data
else:
return dict()
def add_group_vars_file(self, path, loader):
'''
Loads and caches a host_vars file in the _host_vars_files dict,
where the key to that dictionary is the basename of the file, minus
the extension, for matching against a given inventory host name
'''
(name, data) = self._load_inventory_file(path, loader)
if data:
self._group_vars_files[name] = data
return data
else:
return dict()
def set_host_facts(self, host, facts):
'''
Sets or updates the given facts for a host in the fact cache.
'''
assert isinstance(facts, dict)
if host.name not in self._fact_cache:
self._fact_cache[host.name] = facts
else:
try:
self._fact_cache[host.name].update(facts)
except KeyError:
self._fact_cache[host.name] = facts
def set_host_variable(self, host, varname, value):
'''
Sets a value in the vars_cache for a host.
'''
host_name = host.get_name()
if host_name not in self._vars_cache:
self._vars_cache[host_name] = dict()
self._vars_cache[host_name][varname] = value
| scottcunningham/ansible | lib/ansible/vars/__init__.py | Python | gpl-3.0 | 14,895 |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 18:19:18 2015
@author: xiaoxiaol
[ trv_00 trv_01 trv_02 trv_09
trv_03 trv_04 trv_05 trv_10
trv_06 trv_07 trv_08 trv_11 ]
"""
# table column names
# Index([u'specimen_id', u'specimen_name', u'id', u'tvr_00', u'tvr_01', u'tvr_02', u'tvr_03', u'tvr_04', u'tvr_05', u'tvr_06',
# u'tvr_07', u'tvr_08', u'tvr_09', u'tvr_10', u'tvr_11', u'trv_00', u'trv_01', u'trv_02', u'trv_03', u'trv_04', u'trv_05',
#u'trv_06', u'trv_07', u'trv_08', u'trv_09', u'trv_10', u'trv_11', u'metric', u'scale_x', u'scale_y', u'scale_z', u'rotation_x',
# u'rotation_y', u'rotation_z', u'skew_x', u'skew_y', u'skew_z', u'created_at', u'updated_at', u'orca_path'], dtype='object')
import pandas as pd
import os
###########################
data_DIR="/home/xiaoxiaol/work/data/lims2/nr_june_25_filter_aligned/transforms" ## where to store the transform.txt
csv_file = "/home/xiaoxiaol/work/data/lims2/nr_june_25_filter_aligned/june_25_alignement_transform.csv" ## where the transform parameters are, obtained from lims
##########################
if not os.path.exists(data_DIR):
os.makedirs(data_DIR)
df = pd.read_csv(csv_file)
data_table = df.values
num_samples, num_cols = data_table.shape
# transform = np.array([ [ float(a3d.find('tvr-00').text), float(a3d.find('tvr-01').text), float(a3d.find('tvr-02').text), float(a3d.find('tvr-09').text) ],
# [ float(a3d.find('tvr-03').text), float(a3d.find('tvr-04').text), float(a3d.find('tvr-05').text), float(a3d.find('tvr-10').text) ],
# [ float(a3d.find('tvr-06').text), float(a3d.find('tvr-07').text), float(a3d.find('tvr-08').text), float(a3d.find('tvr-11').text) ],
for i in range(num_samples):
orca_path = data_table[i][num_cols-1]
fn= orca_path.split('/')[-1]
text_file = open(data_DIR+"/%s.txt" % fn.split('.swc')[0] , "w")
SCALE = 1000 # from mm to microns
text_file.write( "%f %f %f %f \n" % (SCALE* df['tvr_00'][i], SCALE* df['tvr_01'][i],SCALE* df['tvr_02'][i],SCALE* df['tvr_09'][i]))
text_file.write( "%f %f %f %f \n" % (SCALE* df['tvr_03'][i], SCALE* df['tvr_04'][i],SCALE* df['tvr_05'][i],SCALE* df['tvr_10'][i]))
text_file.write( "%f %f %f %f \n" % (SCALE* df['tvr_06'][i], SCALE* df['tvr_07'][i],SCALE* df['tvr_08'][i],SCALE* df['tvr_11'][i]))
text_file.close()
| XiaoxiaoLiu/morphology_analysis | utilities/writeAffineTransformsFromCSV.py | Python | gpl-3.0 | 2,408 |
""" Cisco_IOS_XE_acl
Cisco XE Native Access Control List (ACL) Yang model.
Copyright (c) 2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class AclPortTypeEnum(Enum):
"""
AclPortTypeEnum
.. data:: biff = 0
.. data:: bootpc = 1
.. data:: bootps = 2
.. data:: discard = 3
.. data:: dnsix = 4
.. data:: domain = 5
.. data:: echo = 6
.. data:: isakmp = 7
.. data:: mobile_ip = 8
.. data:: nameserver = 9
.. data:: netbios_dgm = 10
.. data:: netbios_ns = 11
.. data:: netbios_ss = 12
.. data:: non500_isakmp = 13
.. data:: ntp = 14
.. data:: pim_auto_rp = 15
.. data:: rip = 16
.. data:: ripv6 = 17
.. data:: snmp = 18
.. data:: snmptrap = 19
.. data:: sunrpc = 20
.. data:: syslog = 21
.. data:: tacacs = 22
.. data:: talk = 23
.. data:: tftp = 24
.. data:: time = 25
.. data:: who = 26
.. data:: xdmcp = 27
.. data:: bgp = 28
.. data:: chargen = 29
.. data:: cmd = 30
.. data:: connectedapps_plain = 31
.. data:: connectedapps_tls = 32
.. data:: daytime = 33
.. data:: exec_ = 34
.. data:: finger = 35
.. data:: ftp = 36
.. data:: ftp_data = 37
.. data:: gopher = 38
.. data:: hostname = 39
.. data:: ident = 40
.. data:: irc = 41
.. data:: klogin = 42
.. data:: kshell = 43
.. data:: login = 44
.. data:: lpd = 45
.. data:: msrpc = 46
.. data:: nntp = 47
.. data:: pop2 = 48
.. data:: pop3 = 49
.. data:: smtp = 50
.. data:: telnet = 51
.. data:: uucp = 52
.. data:: whois = 53
.. data:: www = 54
"""
biff = 0
bootpc = 1
bootps = 2
discard = 3
dnsix = 4
domain = 5
echo = 6
isakmp = 7
mobile_ip = 8
nameserver = 9
netbios_dgm = 10
netbios_ns = 11
netbios_ss = 12
non500_isakmp = 13
ntp = 14
pim_auto_rp = 15
rip = 16
ripv6 = 17
snmp = 18
snmptrap = 19
sunrpc = 20
syslog = 21
tacacs = 22
talk = 23
tftp = 24
time = 25
who = 26
xdmcp = 27
bgp = 28
chargen = 29
cmd = 30
connectedapps_plain = 31
connectedapps_tls = 32
daytime = 33
exec_ = 34
finger = 35
ftp = 36
ftp_data = 37
gopher = 38
hostname = 39
ident = 40
irc = 41
klogin = 42
kshell = 43
login = 44
lpd = 45
msrpc = 46
nntp = 47
pop2 = 48
pop3 = 49
smtp = 50
telnet = 51
uucp = 52
whois = 53
www = 54
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xe._meta import _Cisco_IOS_XE_acl as meta
return meta._meta_table['AclPortTypeEnum']
| 111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/Cisco_IOS_XE_acl.py | Python | apache-2.0 | 2,985 |
from bcpp_subject_form_validators import SexualPartnerFormValidator as BaseFormValidator
from ..models import RecentPartner
from .form_mixins import SubjectModelFormMixin
class SexualPartnerFormValidator(BaseFormValidator):
sexual_behaviour_model = 'bcpp_subject.sexualbehaviour'
partner_residency_model = 'bcpp_subject.partnerresidency'
class RecentPartnerForm(SubjectModelFormMixin):
form_validator_cls = SexualPartnerFormValidator
class Meta:
model = RecentPartner
fields = '__all__'
| botswana-harvard/bcpp-subject | bcpp_subject/forms/recent_partner_form.py | Python | gpl-3.0 | 526 |
# -*- coding: utf-8 -*-
import cherrypy
from glams.checkpassword.checkpassword import checkPassword
from glams.databaseInterface.connect import db, db2
from glams.glamsTemplate import glamsTemplate
from glams.website.database.classes import Mouse, Cage, date2str, getAge
from glams.website.database.forms import getMouseForm, getCageForm, getInnerCageForm, getGeneticFilterForm, getStrainList, getLabMemberList, getResidentsFilterForm
import urllib, datetime
from lxml import etree
from lxml.builder import E
from copy import deepcopy
import time
import ast
#import os
def unquote_htmlform(text):
'''This takes a form which has been serialized in jQuery such as
$('#cage1 form').serialize()
and converts it to a list of lists, where the first entry in each list is a key and the second is a value.
example:
text="cagename=ck9&active=Yes&date_activated=2014-02-05&date_inactivated=&location=Conventional+Rack+2&expectingpl=Yes&caretaker=Kyle&cagegroup=C57BL%2F6+WT¬es=3+F+were+imported.%0D%0Aa%3Db%2Bc%0D%0AThis+might+be+difficult."
output=[['cagename', 'ck9'], ['active', 'Yes'], ['date_activated', '2014-02-05'], ['date_inactivated', ''], ['location', 'Conventional Rack 2'], ['expectingpl', 'Yes'], ['caretaker', 'Kyle'], ['cagegroup', 'C57BL/6 WT'], ['notes', '3 F were imported.\r\na=b+c\r\nThis might be difficult.']]
'''
#output=[[urllib.unquote_plus(tmp2).replace(r'\xa0',' ') for tmp2 in tmp.split('=')] for tmp in text.split('&')]
output=[[''.join([i for i in urllib.unquote_plus(tmp2) if ord(i) < 127]) for tmp2 in tmp.split('=')] for tmp in text.split('&')] #this strips all non ascii characters out, but might slow down everything too much
return output
def initialize():
sortby_exists=db.execute("SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME='lab_members' AND COLUMN_NAME='sortby' ")[0][0]
if sortby_exists==0:
db.execute("ALTER TABLE lab_members ADD COLUMN sortby VARCHAR(255) CHARACTER SET utf8 collate utf8_bin")
print("Added column 'sortby' to table 'lab_members'")
def getBreedingStatusList():
return ['','breeding','retired breeder','virgin','unknown']
def getLifeStatusList():
return ['', 'alive','euthanized','missing','transferred']
def getSexList():
return ['unknown', 'male', 'female']
def getZygosityList():
return ['','++','+-','--','+?','-?','??','+/y','+/x']
def getAllMouseColumns():
return ['mousename','strain','sex','life_status','breeding_status','DOB','DOD','age','cause_of_death','tag','mouse_notes','genotyped','cagename','mother','father','reserve_lab_member','reserve_date','reserve_description','reserve_filenames','reserve_notes','reserve_status','genetics','ageatexperiment'] #all these fields come from classes.Mouse.getFromDB
def getAllCageColumns():
return ['cagename2','cagenotes','date_activated','date_inactivated','location','active','caretaker','residents','expectingpl','cagegroup'] #all these fields come from classes.Cage.getFromDB
def getPrettyText(column):
""" This function takes the names of the column fields, as used in Ajax.refresh, and returns the pretty version which will be displayed in the browser"""
convert={'':'', 'mousename':'Mouse', 'strain':'Strain','sex':'Sex',
'life_status':'Life Status','breeding_status':'Breeding Status',
'DOB':'DOB','DOD':'DOD','cause_of_death':'Cause of Death',
'tag':'Tag','mouse_notes':'Notes','genotyped':'Genotyped',
'cagename':'Cage','cagename2':'Cage','mother':'Mother',
'father':'Father','reserve_lab_member':'Experimenter','reserve_filenames':'File Names',
'reserve_date':'Experiment Date','reserve_description':'Experiment Description','reserve_notes':'Experiment Notes','reserve_status':'Experiment Status',
'cagename':'Cage','cagenotes':'Cage Notes','date_activated':'Date Activated',
'date_inactivated':'Date Inactivated','location':'Location',
'active':'Active','caretaker':'Caretaker','genetics':'Genetics','residents':'Residents','expectingpl':'Breeding','cagegroup':'Cage Group','age':'Age','ageatexperiment':'Age at Experiment'}
return convert[column]
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
def getcagehistory(cagename):
answer=db2.execute("""SELECT m.name AS mousename, m.id AS mouseID, m.sex, h.start_date, h.end_date, h.start_date=(SELECT DOB FROM mice WHERE mice.id=m.id) AS bornhere,
h.end_date=(SELECT DOD FROM mice WHERE mice.id=m.id) AS diedhere,
(SELECT MIN(c.name) FROM cages AS c LEFT JOIN housing AS h2 ON h2.cage_id=c.id WHERE h2.end_date=h.start_date AND h2.mouse_id=m.id) AS cagebefore,
(SELECT MIN(c.name) FROM cages AS c LEFT JOIN housing AS h2 ON h2.cage_id=c.id WHERE h2.start_date=h.end_date AND h2.mouse_id=m.id) AS cageafter
FROM cages AS c
LEFT JOIN housing AS h ON c.id=h.cage_id
LEFT JOIN mice AS m ON m.id=h.mouse_id
WHERE c.name=%s""",(cagename,))
# we are getting mice who were introduced, mice who were moved out, mice who were born in this cage, mice who were killed in this cage
timeline=list()
for m in answer:
if m['mousename'] is not None:
name=E.span({'class':m['sex'],'onclick':"clickedOnMouseName('{}');".format(m['mouseID'])},m['mousename'])
### movement into a cage
if m['bornhere']==1:
timeline.append([m['start_date'], E.td(deepcopy(name)," born")])
elif m['cagebefore'] is not None:
cagebefore=E.span({'onclick':"displaycage('{}');".format(m['cagebefore'])},m['cagebefore'])
timeline.append([m['start_date'], E.td(deepcopy(name),' introduced from ',deepcopy(cagebefore))])
else:
timeline.append([m['start_date'],E.td(deepcopy(name), ' introduced')])
### movement out of a cage
if m['end_date'] is not None:
if m['diedhere']==1:
timeline.append([m['end_date'],E.td(deepcopy(name)," died")])
elif m['cageafter'] is not None:
cageafter=E.span({'onclick':"displaycage('{}');".format(m['cageafter'])},m['cageafter'])
timeline.append([m['end_date'],E.td(deepcopy(name)," moved to ",deepcopy(cageafter))])
else:
timeline.append([m['end_date'],E.td(deepcopy(name)," moved out")])
timeline.sort()
return timeline
def makequery(viewtype,c,sortby):
'''
This function creates a mysql query using a dict of all the columns paired with their filters
sample query: "SELECT c.id, c.name AS cagename2, c.notes AS cagenotes, c.date_activated, c.date_inactivated, c.location, c.active, lab_members.name AS caretaker FROM cages as c LEFT JOIN care_taker AS ct ON ct.cage_id=c.id LEFT JOIN lab_members ON ct.lab_member_id=lab_members.id ORDER BY cagename2"
viewtype - either 'mouseview' or 'cageview'
c - the a dictionary of filters. the keys are the names of the columns. eg {'cagename':None,'active':[['active',1]]}
'''
MouseAlias={'mousename':'m.name as mousename',
'strain':'m.strain',
'sex':'m.sex',
'life_status': 'm.life_status',
'breeding_status':'m.breeding_status',
'DOB':'m.DOB','DOD':'m.DOD',
'age':"IF(m.life_status='euthanized',DATEDIFF(m.DOD,m.DOB), DATEDIFF(CURDATE(),m.DOB)) as age",
'cause_of_death':'m.cause_of_death',
'tag':'m.tag',
'mouse_notes':'m.notes AS mouse_notes',
'genotyped':"IF(m.genotyped,'Yes','No') AS genotyped",'cagename':"IF(housing.currentcage=1,cages.name,NULL) AS cagename",
'mother':'mom.name AS mother','father':'dad.name AS father',
'reserve_lab_member':'lab_members.name AS reserve_lab_member',
'reserve_date':'experiments.date AS reserve_date',
'reserve_filenames':'experiments.filenames AS reserve_filenames',
'reserve_description':'experiments.description AS reserve_description',
'reserve_notes':'experiments.notes AS reserve_notes',
'ageatexperiment':"IF(m.life_status='euthanized' AND experiments.date>m.DOD,DATEDIFF(m.DOD,m.DOB),DATEDIFF(experiments.date,m.DOB)) AS ageatexperiment",
'reserve_status':'experiments.status AS reserve_status',
'genetics':"""(SELECT GROUP_CONCAT(CONCAT(genes.name,genetics.zygosity) SEPARATOR ', ') FROM genes LEFT JOIN genetics ON genes.id=genetics.gene_id WHERE genetics.mouse_id=m.id) as genetics"""}
CageAlias={'cagename2':'c.name AS cagename2',
'cagenotes':'c.notes AS cagenotes',
'date_activated':'c.date_activated',
'date_inactivated':'c.date_inactivated',
'location':'c.location',
'active':"IF(c.active,'Yes','No') AS active",
'caretaker':'lab_members.name AS caretaker',
'cagegroup':'c.cagegroup',
'residents':"""CONCAT_WS(' ',
IFNULL(
(
SELECT
GROUP_CONCAT(
CONCAT("<span class='resident ",m.sex," ",IF(EXISTS(SELECT * from experiments WHERE mouse_id=m.id), 'reserved','notreserved'),"'><span class='mousename' mouseID='",m.id,"'>",m.name,"</span> (P",DATEDIFF(CURDATE(),m.DOB),")</span>")
ORDER BY m.DOB
SEPARATOR ' ')
FROM mice as m
LEFT JOIN housing as h ON h.mouse_id=m.id
WHERE h.cage_id=c.id
AND h.currentcage=1
),
''),
IFNULL(
(
SELECT GROUP_CONCAT(CONCAT("<span class='resident'><span class='pl'>PL</span> (P",DATEDIFF(CURDATE(),litters.DOB),")</span>") ORDER BY litters.DOB SEPARATOR '')
FROM litters
WHERE c.id=litters.cage_id
),
'')
) AS residents""",
'expectingpl':"IF(c.expectingpl,'Yes','No') AS expectingpl"}
WhereAlias={ 'mousename':'m.name COLLATE UTF8_GENERAL_CI LIKE %(mousename)s', # the 'COLLATE UTF8_GENERAL_CI' makes it case insensitive, I'm not sure how.
'strain':'m.strain=%(strain)s',
'sex':'m.sex=%(sex)s',
'location':'c.location=%(location)s',
'life_status':'m.life_status=%(life_status)s',
'breeding_status':'m.breeding_status=%(breeding_status)s',
'genotyped':'m.genotyped=%(genotyped)s',
'cagename':'cages.name COLLATE UTF8_GENERAL_CI LIKE %(cagename)s',
'mother':'mom.name=%(mother)s',
'father':'dad.name=%(father)s',
'reserve_lab_member':'lab_members.name=%(reserve_lab_member)s',
'reserve_description':'LOWER(experiments.description) LIKE LOWER(%(reserve_description)s)',
'active':'c.active=%(active)s',
'expectingpl':'c.expectingpl=%(expectingpl)s',
'cagename2':'c.name LIKE %(cagename2)s',
'caretaker':'lab_members.name=%(caretaker)s',
'cagenotes':'c.notes LIKE %(cagenotes)s',
'cagegroup':'c.cagegroup LIKE %(cagegroup)s',
'mouse_notes':'m.notes LIKE %(mouse_notes)s',
'DOB':'m.DOB>=%(DOB0)s AND m.DOB <= %(DOB1)s',
'DOD':'m.DOD>=%(DOD0)s AND m.DOD <= %(DOD1)s',
'age':"IF(m.life_status='euthanized',DATEDIFF(m.DOD,m.DOB), DATEDIFF(CURDATE(),m.DOB))>=%(age0)s and IF(m.life_status='euthanized',DATEDIFF(m.DOD,m.DOB), DATEDIFF(CURDATE(),m.DOB)) <= %(age1)s",
'reserve_date':'experiments.date >= %(reserve_date0)s AND experiments.date <= %(reserve_date1)s',
'reserve_status':'experiments.status LIKE %(reserve_status)s',
'date_activated':'c.date_activated >= %(date_activated0)s AND c.date_activated <= %(date_activated1)s',
'date_inactivated':'c.indate_activated >= %(date_inactivated0)s AND c.date_inactivated <= %(date_inactivated1)s',
'genetics':'',
'residents':"""True = ANY(SELECT (DATEDIFF(CURDATE(),m.DOB)>%(residents0)s AND DATEDIFF(CURDATE(),m.DOB)<%(residents1)s) FROM mice AS m LEFT JOIN housing AS h ON m.id=h.mouse_id WHERE h.currentcage=True AND h.cage_id=c.id)""",
'ageatexperiment':"IF(m.life_status='euthanized' AND experiments.date>m.DOD,DATEDIFF(m.DOD,m.DOB),DATEDIFF(experiments.date,m.DOB)) >= %(ageatexperiment0)s AND IF(m.life_status='euthanized' AND experiments.date>m.DOD,DATEDIFF(m.DOD,m.DOB),DATEDIFF(experiments.date,m.DOB)) <= %(ageatexperiment1)s"
}
d=dict() #d is a dictionary of arguments that will be the second argument in the mysql query, in order to prevent a mysql injection
for k in WhereAlias.keys():
if k in c.keys() and c[k] is not None:
if k in set(['mousename','cagename2','cagename','cagenotes','mouse_notes', 'cagegroup','reserve_description']):
c[k][0][1]='%'+c[k][0][1]+'%' #this makes the filter in mysql have wildcards before and after
if (viewtype=='mouse' and k in MouseAlias.keys()) or (viewtype=='cage' and k in CageAlias.keys()):
if k=='genetics':
genetics=[ [c['genetics'][i][1], c['genetics'][i+1][1], c['genetics'][i+2][1]] for i in range(0,len(c['genetics']),3) ]
genetics[0][0]=''
genetics_joiner="('{0}' IN (SELECT genes.name FROM genes LEFT JOIN genetics ON genes.id=genetics.gene_id WHERE genetics.mouse_id=m.id AND genetics.zygosity='{1}'))"
genetics_where=''
for g in genetics:
genetics_where=genetics_where+' '+g[0]+' '+genetics_joiner.format(g[1],g[2])
c['genetics']=genetics_where
else:
if len(c[k])==1:
d[k]=c[k][0][1]
elif len(c[k])==2: # for a date range
d[k+'0']=c[k][0][1]
d[k+'1']=c[k][1][1]
c[k]=WhereAlias[k]
q=[] #the SELECT part of the query
w=[] #the WHERE part of the query
WHERE=''
if viewtype=='mouse':
FROM=" FROM mice AS m LEFT JOIN housing ON housing.mouse_id=m.id LEFT JOIN cages ON cages.id=housing.cage_id LEFT JOIN lineage ON lineage.child_id=m.id LEFT JOIN mice AS mom ON lineage.mother_id=mom.id LEFT JOIN mice AS dad ON lineage.mother_id=dad.id"
if set(c.keys()).intersection(set(['reserve_lab_member','reserve_date','reserve_filenames','reserve_description','reserve_notes','ageatexperiment','reserve_status'])): #if we want to list any of the experiment information
FROM=FROM+" LEFT JOIN experiments ON experiments.mouse_id=m.id LEFT JOIN lab_members ON experiments.lab_member_id=lab_members.id"
q.append('housing.currentcage')
mousekeys=set(c.keys()).intersection(set(MouseAlias.keys()))
for key in mousekeys:
q.append(MouseAlias[key])
if key in WhereAlias.keys():
w.append(c[key])
w=[item for item in w if item is not None]
w.append("(housing.currentcage=1 OR (housing.currentcage=0 AND housing.start_date= (SELECT max(h.start_date) FROM housing AS h WHERE h.mouse_id=m.id) AND housing.end_date>=housing.start_date) OR housing.currentcage IS NULL)")
if len(w)>0:
WHERE=' WHERE '+' AND '.join(w)
query= 'SELECT m.id, '+', '.join(q)
ORDERBY=" ORDER BY {}".format(sortby)
elif viewtype=='cage':
FROM=" FROM cages as c LEFT JOIN care_taker AS ct ON ct.cage_id=c.id LEFT JOIN lab_members ON ct.lab_member_id=lab_members.id "
cagekeys=set(c.keys()).intersection(set(CageAlias.keys()))
for key in cagekeys:
q.append(CageAlias[key])
w.append(c[key])
w=[item for item in w if item is not None]
if len(w)>0:
WHERE=' WHERE '+' AND '.join(w)
query= 'SELECT c.id, ' + ', '.join(q)
ORDERBY=" ORDER BY {}".format(sortby)
query+=FROM+WHERE+ORDERBY
return db2.execute(query,d)
class Ajax:
@cherrypy.expose
def refresh(self):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes an unknown user to the login screen
article="<thead><tr>"
#########################
######### CREATE HEADER
cols=db.execute("SELECT columns FROM lab_members WHERE name=%s",(username,))[0][0]
viewtype=db.execute("SELECT viewtype FROM lab_members WHERE name=%s",(username,))[0][0]
if viewtype is None:
db.execute("UPDATE lab_members SET viewtype='mouse' WHERE name=%s",(username,))
viewtype='mouse'
if cols is None:
if viewtype=='mouse':
cols="mousename,,"
elif viewtype=='cage':
cols="cagename,,"
cols=cols.split(',')
mousecols=getAllMouseColumns()
cagecols=getAllCageColumns()
for i in range(0,len(cols)-1,2):
if (viewtype=='mouse' and cols[i] in mousecols) or (viewtype=='cage' and cols[i] in cagecols):
if cols[i+1]=='': classs=''
else: classs="filtered"
article+="""<th data-header='{0}' class='{2}' filter='{1}'>{3}
<img src='/support/images/drag-handle.png' class='col-handle'>
<img class="filterclose_button {2}" onclick="removefilter($(this).parent().attr('data-header'));" src="/support/images/x2.gif">
</th>""".format(cols[i],cols[i+1],classs,getPrettyText(cols[i]))
article+="</tr></thead><tbody>"
#########################
######### CREATE BODY
if cols==['','','']:
return ''
columns=[]
coldict={}
for i in range(0,len(cols)-1,2):
columns.append(cols[i])
unquote_htmlform
c=unquote_htmlform(cols[i+1])
if c==[['']]:
c=None
coldict[cols[i]]= c
for key in coldict:
if coldict[key] is not None:
for i in range(len(coldict[key])):
if coldict[key][i][1]=='Yes':
coldict[key][i][1]=1
elif coldict[key][i][1]=='No':
coldict[key][i][1]=0
sortby=db.execute("SELECT sortby FROM lab_members WHERE name=%s",(username,))[0][0]
if sortby is None:
sortby = {'mouse':'mousename DESC','cage':'cagename2 DESC'}
else:
sortby=ast.literal_eval(sortby)
answer=makequery(viewtype,coldict,sortby[viewtype])
if viewtype=='cage':
for entry in answer:
article+="<tr>"
for col in columns:
if col in getAllCageColumns():
if entry[col]==None:
entry[col]='-'
elif type(entry[col]) is datetime.datetime:
entry[col]=date2str(entry[col])
# elif entry[col]==0:
# entry[col]='No'
# elif entry[col]==1:
# entry[col]='Yes'
if col=='cagenotes':
text=html_escape(entry[col])
if len(text)>20:
article+="<td class='{0} tooltip' title='{1}'>{2}</td>".format(col,text,text[:20]) #only display the first 20 characters, tooltip the rest
else:
article+="<td class='{0}'>{1}</td>".format(col,text)
else:
article+="<td class='{0}'>{1}</td>".format(col,entry[col])
article+="</tr>"
else: #if viewtype=='mouse' or if user has no viewtype
for entry in answer:
article+="<tr>"
for col in columns:
if col in getAllMouseColumns():
if entry[col]==None:
entry[col]='-'
elif type(entry[col]) is datetime.datetime:
entry[col]=date2str(entry[col])
# elif entry[col]==0:
# entry[col]='No'
# elif entry[col]==1:
# entry[col]='Yes'
if col=='mousename':
article+="<td class='{0}' mouseID={1}>{2}</td>".format(col,entry['id'],entry[col])
elif col=='mouse_notes' or col=='reserve_notes':
text=html_escape(entry[col])
if len(text)>20:
article+="<td class='{0} tooltip' title='{1}'>{2}</td>".format(col,text,text[:20]) #only display the first 20 characters, tooltip the rest
else:
article+="<td class='{0}'>{1}</td>".format(col,text)
else:
article+="<td class='{0}'>{1}</td>".format(col,entry[col])
article+="</tr>"
article+="</tbody>"
return article
@cherrypy.expose
def sortby(self,col):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
sb=db.execute("SELECT sortby FROM lab_members WHERE name=%s",(username,))[0][0]
if sb is None:
sb = {'mouse':'mousename DESC','cage':'cagename2 DESC'}
else:
sb=ast.literal_eval(sb)
viewtype=db.execute("SELECT viewtype FROM lab_members WHERE name=%s",(username,))[0][0]
oldcol=sb[viewtype].split(' ')[0]
if oldcol==col:
old_order=sb[viewtype].split(' ')[1]
if old_order=='ASC':
new_order='DESC'
elif old_order=='DESC':
new_order='ASC'
sb[viewtype]=col+' '+new_order
else:
sb[viewtype]=col+' DESC'
db.execute("UPDATE lab_members SET sortby=%s WHERE name=%s",(str(sb),username))
return "Sorting by {}".format(str(sb))
@cherrypy.expose
def getFilterForm(self,col): #col is the column information. it is the name of the column. eg: DOB
choicetype='select'
header="<h1>Select {}</h1>".format(getPrettyText(col))
if col=='strain': b=getStrainList();
elif col=='sex': b=getSexList();
elif col=='life_status': b=getLifeStatusList();
elif col=='breeding_status': b=getBreedingStatusList()
elif col=='reserve_lab_member': b=getLabMemberList()
elif col=='caretaker': b=getLabMemberList()
elif col=='DOB': choicetype='dates'
elif col=='age': choicetype='range'
elif col=='DOD': choicetype='dates'
elif col=='reserve_date': choicetype='dates'
elif col=='date_activated': choicetype='dates'
elif col=='date_inactivated': choicetype='dates'
elif col=='mousename': choicetype='textfield'
elif col=='genotyped': b=['Yes','No']
elif col=='cagename': choicetype='textfield'
elif col=='mother': choicetype='textfield'
elif col=='father': choicetype='textfield'
elif col=='cagename2': choicetype='textfield'
elif col=='reserve_description':choicetype='textfield'
elif col=='reserve_status': b=['completed','planned']
elif col=='cagenotes': choicetype='textfield'
elif col=='cagegroup': choicetype='textfield'
elif col=='mouse_notes': choicetype='textfield'
elif col=='location': choicetype='textfield'
elif col=='active': b=['Yes','No']
elif col=='expectingpl': b=['Yes','No']
elif col=='genetics': choicetype='genetics'
elif col=='residents': choicetype='residents'
elif col=='ageatexperiment': choicetype='range'
else:
choicetype=None
header="'{}' doesn't have a filter enabled".format(getPrettyText(col))
options=''
if choicetype=='select':
options="<select name='{}'>".format(col)
for i in range(len(b)): options+="""<option value="{0}">{0}</option>""".format(b[i])
options+="</select><input class='button-link' type='submit'>"
elif choicetype=='textfield':
options="<input type='text' name='{}'><input class='button-link' type='submit'>".format(col)
elif choicetype=='range':
options="<input type='number' step='1' name = '{}'> To <input type='number' step='1' name = '{}'><input class='button-link' type='submit'>".format(col+'1',col+'2')
elif choicetype=='dates':
options="<input type= 'date' name='{}'> To <input name='{}' type= 'date'><input class='button-link' type='submit'>".format(col+'1',col+'2')
elif choicetype=='genetics':
options=getGeneticFilterForm()+"<input class='button-link' style='float: right;bottom: 10px;right: 10px;white-space: nowrap;' type='submit'>"
elif choicetype=='residents':
options=getResidentsFilterForm()+"<input class='button-link' style='float: right;bottom: 10px;right: 10px;white-space: nowrap;' type='submit'>"
article="<form id='filter' col={}>".format(col)+header+options+"""</form><img class='close_button' src='/support/images/x.gif' onclick="closePopup('.bubble')">"""
return article
@cherrypy.expose
def removefilter(self,col):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
filt=''
cols=db.execute("SELECT columns FROM lab_members WHERE name=%s",(username,))[0][0]
if cols is None:
cols='mousename,,'
cols=cols.split(',')
# now find the column of interest and replace its filter with the new filter
cols[cols.index(col)+1]=filt
#now convert it back into a string and save it to the database
cols=','.join(cols)
db.execute("UPDATE lab_members SET columns=%s WHERE name=%s",(cols,username))
return(filt)
@cherrypy.expose
def setfilter(self,col,filt):
'''
Takes a column name and a desired filter, and replaces the column-filter pair in the 'columns' entry in the 'lab_members' table in the database.
The 'column' entry is used in the 'makequery()' function
col - the name of the column we are changing the filter for. eg 'genetics'
filt - the serialized form the user submits when editing a filter. eg 'logiccomb0=AND&gene0=i-tdTomato&zygosity0=%2B%2B&logiccomb1=AND&gene1=VGAT-Cre&zygosity1=%2B-&logiccomb2=AND&gene2=&zygosity2='
'''
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
#check if any selection is blank. If it is, remove filter
filt2=unquote_htmlform(filt)
hasblank=False
if col=='genetics':
if len(filt2)==1:
filt=''
else:
if filt2[1][1]=='' or filt2[2][1]=='':
hasblank=True
else:
filt='&logiccomb'.join(filt.split('&logiccomb')[:-1]) #gets rid of the last set of blanks
else:
for i in range(len(filt2)):
if filt2[i][1]=='': #if there is a blank
hasblank=True
if hasblank:
filt=''
#take all the users column headers from the database
cols=db.execute("SELECT columns FROM lab_members WHERE name=%s",(username,))[0][0]
if cols is None:
cols='mousename,,'
cols=cols.split(',')
# now find the column of interest and replace its filter with the new filter
cols[cols.index(col)+1]=filt
#now convert it back into a string and save it to the database
cols=','.join(cols)
db.execute("UPDATE lab_members SET columns=%s WHERE name=%s",(cols,username))
return(filt)
@cherrypy.expose
def trash(self, name, typ):
'''
This function permanently deletes a mouse, cage, or PL when called.
typ is either 'cage' 'mouse' or 'pl'.
'''
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
if typ=='mouse': #If we are deleting a mouse
mouseid=db.execute("SELECT id FROM mice WHERE name=%s",(name,))
if mouseid==[]:
return 'You cannot delete this mouse because it does not exist.'
else:
mouseid=mouseid[0][0]
owner=db.execute("SELECT name FROM lab_members LEFT JOIN experiments ON lab_members.id=experiments.lab_member_id WHERE mouse_id=%s",(mouseid,))
if owner==[]:
owner=None
else:
owner=owner[0][0]
if owner is not None and owner!=username:
return "{} cannot be deleted because it is reserved by {}".format(name, owner)
else:
db.execute('DELETE FROM mice WHERE id=%s',(mouseid,))
return "{} has been deleted permanently".format(name)
elif typ=='cage': #If we are deleting a cage
cageid=db.execute("SELECT id FROM cages WHERE name=%s",(name,))
if cageid==[]:
'You cannot delete this cage because it does not exist.'
else:
cageid=cageid[0][0]
owner=db.execute("SELECT name FROM lab_members LEFT JOIN care_taker ON lab_members.id=care_taker.lab_member_id WHERE cage_id=%s",(cageid,))
if owner==[]:
owner=None
else:
owner=owner[0][0]
if owner is not None and owner!=username:
return "{} cannot be deleted because it is managed by {}".format(name, owner)
else:
db.execute("DELETE FROM housing WHERE cage_id=%s",(cageid,))
db.execute("DELETE FROM care_taker WHERE cage_id=%s",(cageid,))
db.execute('DELETE FROM cages WHERE id=%s',(cageid,))
return "{} has been deleted permanently".format(name)
elif typ=='pl':
fields=unquote_htmlform(name)
d={i[0]: i[1] for i in fields}
db.execute("DELETE FROM litters WHERE cage_id=%s AND mother_id=%s AND DOB=%s",(d['cage_id'],d['mother_id'],d['DOB']))
return "That pup litter has been deleted permanently"
@cherrypy.expose
def selectColumns(self,columns):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
c=unquote_htmlform(columns)
cols=[k[0] for k in c]
if 'cagename2' not in cols: # cagename must be displayed in cageview, or errors will occur
cols.insert(0,'cagename2')
if 'mousename' not in cols: # mousename must be displayed, or errors will occur
cols.insert(0,'mousename')
cols=",,".join(cols)+",,"
cols=cols.replace('active,,','active,active=Yes,')
cols=cols.replace('life_status,,','life_status,life_status=alive,')
cols=cols.replace('caretaker,,','caretaker,caretaker='+username+',')
sortby=str({'mouse':'mousename DESC','cage':'cagename2 DESC'})
db.execute("UPDATE lab_members SET columns=%s, sortby=%s WHERE name=%s",(cols,sortby,username))
return cols
@cherrypy.expose
def pickView(self,viewtype):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
if viewtype=='mouse':
db.execute("UPDATE lab_members SET viewtype='mouse' WHERE name=%s",(username,))
elif viewtype=='cage':
db.execute("UPDATE lab_members SET viewtype='cage' WHERE name=%s",(username,))
return 'success'
@cherrypy.expose
def pickColumnsForm(self):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
else:
oldcols=db.execute("SELECT columns FROM lab_members WHERE name=%s",(username,))[0][0]
if oldcols is None:
oldcols=''
oldcols=oldcols.split(',')
oldcols=[oldcols[i] for i in range(0,len(oldcols),2)]
article="<div style='display: none; background-color:yellow; '>f</div>"
article+="<form id='selectColumns'>"
article+="<div style='float:left; margin-right: 50px; margin-bottom:20px; width:100%'>"
article+="<table><tbody><tr>"
article+= "<th><label>Mouse view</label></th>"
article+= "<th><label>Cage view</label></th>"
article+="<tr><td><fieldset id='mouseview'>"
cols=getAllMouseColumns()
for c in cols:
if c in oldcols:
checked='checked'
else:
checked=''
article+="<label>{0}</label><input name='{1}' type='checkbox' {2}><br>".format(getPrettyText(c),c,checked)
article+= "</fieldset></td>"
article+= "<td><fieldset id='cageview'>"
cols=getAllCageColumns()
for c in cols:
if c in oldcols:
checked='checked'
else:
checked=''
article+="<label>{0}</label><input name='{1}' type='checkbox' {2}><br>".format(getPrettyText(c),c,checked)
article+= "</fieldset>"
article+="</td></tr></tbody></table>"
article+="</div><input style='position: absolute; bottom: 10px; right: 10px;' class='button-link' type='submit'></form>"
article+="""<img class='close_button' src='/support/images/x.gif' onclick="closePopup('.bubble')">"""
return article
###############################################################################
######### CAGE STUFF #########################################
###############################################################################
@cherrypy.expose
def addcageForm(self,cageN,cagename):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
if cagename=='': #if creating new cage
return getCageForm(username, cageN=cageN)
else: #if editing cage
c=Cage(cagename)
return getCageForm(username, c.d,cageN,c.mice,c.litters,getcagehistory(cagename))
@cherrypy.expose
def addcage(self,data, cagename):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
data2=unquote_htmlform(data)
d={}
c=Cage(cagename)
for i in data2:
if i[1]=='':
d[i[0]]=None
elif i[1]=='True' or i[1]=='Yes':
d[i[0]]=1
elif i[1]=='False' or i[1]=='No':
d[i[0]]=0
else:
d[i[0]]=i[1] #now in dictionary
if cagename=='': #if this is creating a new cage
return c.addToDB(d)
else:
if c.d==[]: #if this isn't a real cagename
return "'{}' isn't a real cage name".format(cagename)
return c.editOldCage(d)
@cherrypy.expose
def refreshcage(self,cageid,cagename):
print('refreshcage function is working')
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
c=Cage(cagename)
return etree.tostring(getInnerCageForm(username,c.d, c.mice,c.litters,getcagehistory(cagename)), pretty_print=True)
###############################################################################
######### MOUSE STUFF #########################################
###############################################################################
@cherrypy.expose
def editmouse(self, data, mouseID):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
data2=unquote_htmlform(data)
m=Mouse(username,mouseID)
d={}
for i in data2:
if i[1]=='':
d[i[0]]=None
elif i[1]=='True' or i[1]=='Yes':
d[i[0]]=1
elif i[1]=='False' or i[1]=='No':
d[i[0]]=0
else:
d[i[0]]=i[1] #now in dictionary
if mouseID=='': # if this is creating a new mouse
return m.addToDB(d)
else:
answer=m.editOldMouse(d)
if username=='Mel':
return answer+' mouse mouse'
else:
return answer
@cherrypy.expose
def mouseform(self,mouseID):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
if mouseID=='': #if adding new mouse
return getMouseForm()
else: #if editing old mouse
m=Mouse(username,mouseID)
return getMouseForm(m.d)
@cherrypy.expose
def moveMouse(self,mouseID,newcage):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
m=Mouse(username,mouseID)
m.d['name']=m.d['mousename']
return m.transfer(newcage)
@cherrypy.expose
def removeMouseFromCage(self,mouseID):
time.sleep(.2) # without this I get the error: "ReferenceError: weakly-referenced object no longer exists"
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
time.sleep(.2) # without this I get the error: "ReferenceError: weakly-referenced object no longer exists"
m=Mouse(username,mouseID)
return m.removeFromCage()
###############################################################################
######### PL STUFF ############################################
###############################################################################
@cherrypy.expose
def addPLForm(self,cagename):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
article=""
article+="""<h1>Add Pup Litter</h1><form id='addPL'><div style="float:left; margin-right: 50px;">"""
article+="""<div class="bubbleAlert" style="display: none; background-color:yellow; ">f</div>"""
article+="<label>Cage name: </label><input type='text' name='cagename' value='{}'><br>".format(cagename)
article+="<label>Date of Birth:</label> <input type= 'date' name='DOB' value='{}'><br>".format(date2str(datetime.datetime.now()))
article+="<label>Mother's name:</label> <input type='text' name='mother'><br>"
article+="<label>Father's name:</label> <input type='text' name='father'><br>"
article+="<label>Notes: </label> <textarea rows='10' cols='30' name='notes'></textarea>"
article+="""<a class='button-link' onclick="addPL($('#addPL').serialize(),'{}');">Submit</a>""".format(cagename)
article+="</form>"
article+="""<img class='close_button' src='/support/images/x.gif' onclick="closePopup('.bubble')"> """
article+="</div>"
return article
@cherrypy.expose
def editplForm(self,plinfo):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
fields=unquote_htmlform(plinfo)
d={i[0]: i[1] for i in fields}
oldplinfo='&oldcage_id={}&oldDOB={}&oldmother_id={}'.format(d['cage_id'],d['DOB'],d['mother_id'])
d=db2.execute("SELECT l.DOB, l.notes, mom.name AS mother, dad.name AS father, cages.name AS cagename FROM litters AS l LEFT JOIN mice AS mom ON mom.id=mother_id LEFT JOIN mice AS dad ON dad.id=father_id LEFT JOIN cages ON cages.id=l.cage_id WHERE l.cage_id=%s AND l.mother_id=%s AND l.DOB=%s",(d['cage_id'],d['mother_id'],d['DOB']))[0]
article=""
article+="""<h1>Edit Pup Litter</h1><form id='editPL'><div style="float:left; margin-right: 50px;">"""
article+="""<div class="bubbleAlert" style="display: none; background-color:yellow; ">f</div>"""
article+="<label>Cage name: </label><input type='text' name='cagename' value='{}'><br>".format(d['cagename'])
article+="<label>Date of Birth:</label> <input type= 'date' name='DOB' value='{}'><br>".format(date2str(d['DOB']))
article+="<label>Mother's name:</label> <input type='text' name='mother' value='{}'><br>".format(d['mother'])
article+="<label>Father's name:</label> <input type='text' name='father' value='{}'><br>".format(d['father'])
article+="<label>Notes: </label> <textarea rows='10' cols='30' name='notes'>"+d['notes']+"</textarea>"
editpl="editPL($('#editPL').serialize()+'{0}','{1}');".format(oldplinfo,d['cagename'])
article+="""<a class='button-link' onclick="{0}">Submit</a>""".format(editpl)
article+="</form>"
article+="""<img class='close_button' src='/support/images/x.gif' onclick="closePopup('.bubble')"> """
article+="""<a class='button-link' onclick="{0} separatePLform($('#editPL').serialize());">Separate</a>""".format(editpl)
article+="</div>"
return article
@cherrypy.expose
def editPL(self,fields):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
fields=unquote_htmlform(fields)
d={i[0]: i[1] for i in fields}
cage_id=db.execute("SELECT id FROM cages WHERE name=%s",(d['cagename'],),commit=False)
if cage_id==[]:
return 'The cage you selected does not exist'
del d['cagename']
d['cage_id']=cage_id[0][0]
father_id=db.execute("SELECT id FROM mice WHERE name=%s",(d['father'],),commit=False)
if father_id==[]:
return 'The father you selected does not exist'
del d['father']
d['father_id']=father_id[0][0]
mother_id=db.execute("SELECT id FROM mice WHERE name=%s",(d['mother'],),commit=False)
if mother_id==[]:
return 'The mother you selected does not exist'
del d['mother']
d['mother_id']=mother_id[0][0]
db.execute("DELETE FROM litters WHERE cage_id=%s AND mother_id=%s AND DOB=%s",(d['oldcage_id'],d['oldmother_id'],d['oldDOB']))
del d['oldcage_id']
del d['oldmother_id']
del d['oldDOB']
columns=', '.join(d.keys())
parameters = ', '.join(['%({0})s'.format(k) for k in d.keys()])
query = 'INSERT INTO litters ({0}) VALUES ({1})'.format(columns, parameters)
db.execute(query,d)
return 'Successfully edited pup litter'
@cherrypy.expose
def addPL(self,fields):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
fields=unquote_htmlform(fields)
d={i[0]: i[1] for i in fields}
cagename=d['cagename']
cage_id=db.execute("SELECT id FROM cages WHERE name=%s",(d['cagename'],),commit=False)
if cage_id==[]:
return 'The cage you selected does not exist'
del d['cagename']
d['cage_id']=cage_id[0][0]
father_id=db.execute("SELECT id FROM mice WHERE name=%s",(d['father'],),commit=False)
if father_id==[]:
return 'The father you selected does not exist'
del d['father']
d['father_id']=father_id[0][0]
mother_id=db.execute("SELECT id FROM mice WHERE name=%s",(d['mother'],),commit=False)
if mother_id==[]:
return 'The mother you selected does not exist'
del d['mother']
d['mother_id']=mother_id[0][0]
columns=', '.join(d.keys())
parameters = ', '.join(['%({0})s'.format(k) for k in d.keys()])
query = 'INSERT INTO litters ({0}) VALUES ({1})'.format(columns, parameters)
db.execute(query,d)
return 'Successfully added pup litter to {}'.format(cagename)
@cherrypy.expose
def separatePLform(self,plinfo):
fields=unquote_htmlform(plinfo)
d={i[0]: i[1] for i in fields}
article=''
article+="""<h1>Separate Pup Litter</h1><form id='separatePL'><div style="float:left; margin-right: 50px;">"""
article+="""<div class="bubbleAlert" style="display: none; background-color:yellow; ">f</div>"""
article+="<input type='hidden' name='cagename' value='{}'>".format(d['cagename'])
article+="<input type='hidden' name='DOB' value='{}'>".format(d['DOB'])
article+="<input type='hidden' name='mother' value='{}'>".format(d['mother'])
article+="<input type='hidden' name='father' value='{}'>".format(d['father'])
article+="<input type='hidden' name='notes' value='{}'>".format(d['notes'])
article+="<label>Base name: </label><input type='text' name='basename' value='{}'>".format(d['mother'][0]+'.'+d['DOB'].replace('-','')[2:]+'.')
article+="<label>Number of males: </label><input type='text' name='nmales'>"
article+="<label>Number of females: </label><input type='text' name='nfemales'>"
article+="<label>Number of unknowns: </label><input type='text' name='nunknowns'>"
article+="""<a class='button-link' onclick="{0}">Submit</a>""".format("separatePL($('#separatePL').serialize());")
article+="</form>"
article+="""<img class='close_button' src='/support/images/x.gif' onclick="closePopup('.bubble')"> """
article+="</div>"
return article
@cherrypy.expose
def separatePL(self,plinfo):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
fields=unquote_htmlform(plinfo)
d={i[0]: i[1] for i in fields}
d['life_status']='alive'
d['genotyped']=0
d['breeding_status']='virgin'
d['startDate']=d['DOB']
cage_id=db.execute("SELECT id FROM cages WHERE name=%s",(d['cagename'],),commit=False)[0][0]
mother_id=db.execute("SELECT id FROM mice WHERE name=%s",(d['mother'],),commit=False)[0][0]
if db.execute("SELECT * from litters WHERE cage_id=%s AND mother_id=%s AND DOB=%s",(cage_id,mother_id,d['DOB']))==[]:
return 'The PL you are trying to separate no longer exists.'
m=Mouse(username)
try:
d['nmales']=int(d['nmales'])
d['nfemales']=int(d['nfemales'])
d['nunknowns']=int(d['nunknowns'])
except ValueError:
return 'The number of males and females must be an integer value.'
i=0
d['sex']='male'
for n in range(d['nmales']):
i+=1
d['mousename']=d['basename']+str(i)
while db.execute("SELECT id FROM mice WHERE name=%s",(d['mousename'],),commit=False) != []:
i+=1
d['mousename']=d['basename']+str(i)
m.addToDB(d)
d['sex']='female'
for n in range(d['nfemales']):
i+=1
d['mousename']=d['basename']+str(i)
while db.execute("SELECT id FROM mice WHERE name=%s",(d['mousename'],),commit=False) != []:
i+=1
d['mousename']=d['basename']+str(i)
m.addToDB(d)
d['sex']='unknown'
for n in range(d['nunknowns']):
i+=1
d['mousename']=d['basename']+str(i)
while db.execute("SELECT id FROM mice WHERE name=%s",(d['mousename'],),commit=False) != []:
i+=1
d['mousename']=d['basename']+str(i)
m.addToDB(d)
db.execute("DELETE FROM litters WHERE cage_id=%s AND mother_id=%s AND DOB=%s",(cage_id,mother_id,d['DOB']))
return 'Successfully separated pup litter!'
class Database:
ajax=Ajax()
@cherrypy.expose
def index(self):
username=checkPassword()
if not username:
return """<meta http-equiv="refresh" content="0;url=/home/login" />""" #This takes the unknown user to login
###################################################################################################################
# STYLE AND JAVASCRIPT
###################################################################################################################
style="""
.filterclose_button:not(.filtered){display:none;}
.filterclose_button.filtered{
position: absolute;
top: 2px;
right: 2px;
z-index: 1;
display:none;}
#db th:hover .filterclose_button.filtered{display:block;}
"""
javascript="""
$( document ).ready(function(){
});
"""
resources= "<style type='text/css'>"+style+"</style>"
resources+="<link rel='stylesheet' href='/support/css/database.css' type='text/css' />"
resources+="<link rel='stylesheet' href='/support/css/dragtable.css' type='text/css' />"
resources+="<script type='text/javascript'>"+javascript+"</script>"
resources+="<script type='text/javascript' src='/support/javascript/jquery.dragtable.js'></script>"
resources+="<script type='text/javascript' src='/support/javascript/databaseInterface.js'></script>"
resources+="<link rel='stylesheet' type='text/css' href='/support/thirdparty/tooltipster/css/tooltipster.css' />"
resources+="<script type='text/javascript' src='/support/thirdparty/tooltipster/js/jquery.tooltipster.js'></script>"
viewtype,cols=db.execute("SELECT viewtype, columns FROM lab_members WHERE name=%s",(username,))[0]
if viewtype is None:
viewtype='mouse'
cols="mousename,,"
if cols is None:
cols="mousename,,"
cols=cols.split(',')
if viewtype=='mouse':
c='checked'
else:
c=''
article= ''
article+="""<div class="onoffswitch" id='viewtype' style='display:inline-block;'>
<input type="checkbox" name="view" class="onoffswitch-checkbox" id="myonoffswitch" {0}>
<label class="onoffswitch-label" for="myonoffswitch">
<div class="onoffswitch-inner"></div>
<div class="onoffswitch-switch"></div>
</label>
</div>""".format(c)
article+="""<span style='float:right;color:#999;font-size:13px;' id='nrows'></span>"""
article+="""<table id='db' class="tablesorter"></table>"""
stats=db.execute("""SELECT
(SELECT COUNT(*) from mice),
(SELECT COUNT(*) from mice where mice.life_status='alive'),
(SELECT COUNT(*) from cages),
(SELECT COUNT(*) from cages where cages.active=True);""")[0]
rightbar= """<p><a id="editmice" class="button-link getBubbleButton" style='top:70px;padding:3px 5px;display: block;width: 120px;'">Add Mice</a></p>
<p><a id="addcage" class="button-link" style='top:106px; padding:3px 5px;display: block;width: 120px;'">Add Cage</a></p>
<p><a id="pickColumns" class="button-link getBubbleButton" style='top:142px;padding:3px 5px;display: block;width: 120px;'">Pick Columns</a></p>
<p><img id='trashcan' src='/support/images/trash.svg' style='top:200px; height: 60px;'"></p>"""
rightbar+="""<p style='color:white;'>
Total mice: {}<br>
Active mice: {}<br>
Total cages: {}<br>
Active cages: {}
</p>""".format(stats[0],stats[1],stats[2],stats[3])
username=checkPassword()
return glamsTemplate(article, username, resources=resources, rightbar=rightbar)
initialize()
| kyleellefsen/Glams | Glams/glams/website/database/database.py | Python | mit | 55,521 |
#http://pandas.pydata.org/pandas-docs/stable/tutorials.html
#file='pand.py'
#exec(compile(open(file).read(), file, 'exec'))
from pandas import DataFrame, read_csv
import matplotlib.pyplot as plt
import pandas as pd
#import sys
#import matplotlib
names = ['Bob','Jessica','Mary','John','Mel']
births = [968, 155, 77, 578, 973]
BabyDataSet = list(zip(names,births)) # zip pairs entries together and list combines the entries to a list
print(BabyDataSet)
#The DataFrame attribute of pandas reorganizes the list into a tabular panda object
#similar to an sql table or an excel spreadsheet.
df = pd.DataFrame(data = BabyDataSet, columns=['Names', 'Births'])
print(df)
wait = input("PRESS ENTER TO CONTINUE.")
#We can now save the content as a standard tabular data format (csv)
df.to_csv('births1880.csv',index=False,header=False)
#We can also read back from the same file into a panda object
df = pd.read_csv(r'births1880.csv')
print(df)
print('Wrong header. read_cvs treated the first record as the header')
print('set the header to null')
wait = input("PRESS ENTER TO CONTINUE.")
df = pd.read_csv(r'births1880.csv',header=None)
print(df)
print('Now we have the right data but no header')
print('Label the headers')
wait = input("PRESS ENTER TO CONTINUE.")
df = pd.read_csv(r'births1880.csv', names=['Names','Births'])
print(df)
print('This looks like the table we need')
print('Numbers of 0,1,2,3,4 are row numbers similar to an Excel spreadsheet')
wait = input("PRESS ENTER TO CONTINUE.")
print('Lets do something with this tabulated data')
print('Sort the dataframe and select the top row')
Sorted1=df.sort_values(['Births'], ascending=False)
#Sorted2=df.sort_values(by='Births', ascending=False)
#Sorted.head(1)
print(Sorted1)
wait = input("PRESS ENTER TO CONTINUE.")
print('Use the max() attribute to find the maximum value')
MaxValue=df['Births'].max()
print('MaxValue is ',MaxValue)
wait = input("PRESS ENTER TO CONTINUE.")
print('Convert a column to an array')
print(df['Names'].values)
print('Reference the second entry')
print(df['Names'][1:2].values)
print('Apply a booleen mask on the Births column when compared to the MaxValue')
mask = df['Births']==MaxValue
print(mask)
print('Find the name associated with the maximum value')
MaxName = df['Names'][mask].values
print('Name at Max Value is ',MaxName)
wait = input("PRESS ENTER TO CONTINUE.")
#Create a graph object
print('Create a graph object')
df['Births'].plot()
# Text to display on graph
print('Construct a string to display on the graph')
Text = str(MaxValue) + " - " + MaxName
print(Text)
# Add text to graph
print('Annonate the graph')
plt.annotate(Text, xy=(1, MaxValue), xytext=(8, 0),
xycoords=('axes fraction', 'data'), textcoords='offset points')
print('Show the graph')
plt.show()
#Uncomment the following to save it as a png file
#plt.savefig('mygraph.png')
| nuitrcs/python-researchers-toolkit | scripts/pand.py | Python | mit | 2,880 |
from blaze.partition import *
from blaze.expr import shape
import numpy as np
x = np.arange(24).reshape(4, 6)
def eq(a, b):
if isinstance(a == b, bool):
return a == b
if isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
return (a == b).all()
else:
return a == b
def test_partition_get():
assert eq(partition_get(x, (0, slice(0, None)), chunksize=(1, 6)),
x[0, :])
assert eq(partition_get(x, (slice(0, None), 0), chunksize=(4, 1)),
x[:, 0])
assert eq(partition_get(x, (slice(2, 4), slice(0, 2)), chunksize=(2, 2)),
x[2:4, 0:2])
def test_partition_set():
x = np.arange(24).reshape(4, 6)
partition_set(x,
(slice(0, 2), slice(0, 2)), np.array([[1, 1], [1, 1]]),
chunksize=(2, 2))
assert (x[:2, :2] == 1).all()
def test_partition_set_1d():
x = np.arange(24).reshape(4, 6)
partition_set(x,
(slice(0, 4), 0), np.array([[1], [1], [1], [1]]),
chunksize=(4, 1))
assert (x[:4, 0] == 1).all()
def test_partitions():
assert list(partitions(x, chunksize=(1, 6))) == \
[(i, slice(0, 6)) for i in range(4)]
assert list(partitions(x, chunksize=(4, 1))) == \
[(slice(0, 4), i) for i in range(6)]
assert list(partitions(x, chunksize=(2, 3))) == [
(slice(0, 2), slice(0, 3)), (slice(0, 2), slice(3, 6)),
(slice(2, 4), slice(0, 3)), (slice(2, 4), slice(3, 6))]
def dont_test_partitions_flat():
assert list(partitions(x, chunksize=(2, 3))) == [
(slice(0, 2), slice(0, 3)), (slice(0, 2), slice(3, 6)),
(slice(2, 4), slice(0, 3)), (slice(2, 4), slice(3, 6))]
def test_uneven_partitions():
x = np.arange(10*12).reshape(10, 12)
parts = list(partitions(x, chunksize=(7, 7)))
assert len(parts) == 2 * 2
assert parts == [(slice(0, 7), slice(0, 7)), (slice(0, 7), slice(7, 12)),
(slice(7, 10), slice(0, 7)), (slice(7, 10), slice(7, 12))]
x = np.arange(20*24).reshape(20, 24)
parts = list(partitions(x, chunksize=(7, 7)))
def test_3d_partitions():
x = np.arange(4*4*6).reshape(4, 4, 6)
parts = list(partitions(x, chunksize=(2, 2, 3)))
assert len(parts) == 2 * 2 * 2
| vitan/blaze | blaze/tests/test_partition.py | Python | bsd-3-clause | 2,295 |
from ConfigParser import DEFAULTSECT
from cmd import Cmd
import logging
from threading import Thread
from fibbingnode import CFG, log
from fibbingnode.misc.sjmp import SJMPClient, ProxyCloner
from networkx import DiGraph
from fibbingnode.southbound.interface import ShapeshifterProxy, FakeNodeProxy
class ShapeshifterProxyTest(ShapeshifterProxy):
def __init__(self):
self.graph = DiGraph()
def add_edge(self, source, destination, metric):
log.info('Adding %s-%s @ %s', source, destination, metric)
self.graph.add_edge(source, destination, cost=metric)
def remove_edge(self, source, destination):
log.info('Removing %s-%s', source, destination)
self.graph.remove_edge(source, destination)
def boostrap_graph(self, graph):
log.info('Received graph: %s', graph)
for u, v, m in graph:
self.graph.add_edge(u, v, cost=m)
class TestCLI(Cmd):
Cmd.prompt = '> '
def __init__(self, client, *args, **kwargs):
Cmd.__init__(self, *args, **kwargs)
self.client = client
def do_add(self, line=''):
self.client.add(('192.168.14.1', '192.168.23.2', 1, '3.3.3.0/24'))
self.client.add((None, '192.168.23.2', 1, '4.4.4.0/24'))
self.client.add([(None, '192.168.23.2', 1, '5.5.5.0/24'),
(None, '192.168.14.1', 1, '5.5.5.0/24')])
def do_remove(self, line=''):
self.client.remove(('192.168.14.1', '192.168.23.2', '3.3.3.0/24'))
self.client.remove((None, '192.168.23.2', '4.4.4.0/24'))
self.client.remove([(None, '192.168.23.2', '5.5.5.0/24'),
(None, '192.168.14.1', '5.5.5.0/24')])
def do_exit(self, line):
return True
if __name__ == '__main__':
log.setLevel(logging.DEBUG)
shapeshifter = ShapeshifterProxyTest()
c = SJMPClient("localhost",
CFG.getint(DEFAULTSECT, "json_port"),
target=shapeshifter)
fakenode = ProxyCloner(FakeNodeProxy, c)
Thread(target=c.communicate, name='client').start()
TestCLI(fakenode).cmdloop()
c.stop()
| lferran/FibbingNode | tests/manual/shapeshifterproxytest.py | Python | gpl-2.0 | 2,116 |
import os
import pandas as pd
from .config import BASE_URL
dirname = os.path.dirname(os.path.abspath(__file__))
df = pd.read_pickle(os.path.join(dirname, 'data.p'))
def get_geo(code, year):
row = df[df['insee_code'] == code]
return row.to_dict('records')[0]
def url_resolver(code, year, region_code, department_code):
zero_pad = lambda num: f'0{str(num)}'
if year == 2012:
reg_code = zero_pad(region_code)
dep_code = zero_pad(department_code)
com_code = zero_pad(code)
url = (f'{BASE_URL}elecresult__PR2012/(path)/PR2012/'
f'{reg_code}/{dep_code}/{com_code}.html')
return url
| miroli/frenchy | frenchy/utils.py | Python | mit | 651 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest.mock import MagicMock, patch
import pytest
import requests
import requests_mock
from airflow.exceptions import AirflowException
from airflow.providers.apache.druid.hooks.druid import DruidDbApiHook, DruidHook
class TestDruidHook(unittest.TestCase):
def setUp(self):
super().setUp()
session = requests.Session()
adapter = requests_mock.Adapter()
session.mount('mock', adapter)
class TestDRuidhook(DruidHook):
def get_conn_url(self):
return 'http://druid-overlord:8081/druid/indexer/v1/task'
self.db_hook = TestDRuidhook()
@requests_mock.mock()
def test_submit_gone_wrong(self, m):
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "FAILED"}}',
)
# The job failed for some reason
with pytest.raises(AirflowException):
self.db_hook.submit_indexing_job('Long json file')
assert task_post.called_once
assert status_check.called_once
@requests_mock.mock()
def test_submit_ok(self, m):
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "SUCCESS"}}',
)
# Exists just as it should
self.db_hook.submit_indexing_job('Long json file')
assert task_post.called_once
assert status_check.called_once
@requests_mock.mock()
def test_submit_correct_json_body(self, m):
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "SUCCESS"}}',
)
json_ingestion_string = """
{
"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"
}
"""
self.db_hook.submit_indexing_job(json_ingestion_string)
assert task_post.called_once
assert status_check.called_once
if task_post.called_once:
req_body = task_post.request_history[0].json()
assert req_body['task'] == "9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"
@requests_mock.mock()
def test_submit_unknown_response(self, m):
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "UNKNOWN"}}',
)
# An unknown error code
with pytest.raises(AirflowException):
self.db_hook.submit_indexing_job('Long json file')
assert task_post.called_once
assert status_check.called_once
@requests_mock.mock()
def test_submit_timeout(self, m):
self.db_hook.timeout = 1
self.db_hook.max_ingestion_time = 5
task_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
status_check = m.get(
'http://druid-overlord:8081/druid/indexer/v1/task/9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/status',
text='{"status":{"status": "RUNNING"}}',
)
shutdown_post = m.post(
'http://druid-overlord:8081/druid/indexer/v1/task/'
'9f8a7359-77d4-4612-b0cd-cc2f6a3c28de/shutdown',
text='{"task":"9f8a7359-77d4-4612-b0cd-cc2f6a3c28de"}',
)
# Because the jobs keeps running
with pytest.raises(AirflowException):
self.db_hook.submit_indexing_job('Long json file')
assert task_post.called_once
assert status_check.called
assert shutdown_post.called_once
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_conn_url(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.host = 'test_host'
get_conn_value.conn_type = 'https'
get_conn_value.port = '1'
get_conn_value.extra_dejson = {'endpoint': 'ingest'}
mock_get_connection.return_value = get_conn_value
hook = DruidHook(timeout=1, max_ingestion_time=5)
assert hook.get_conn_url() == 'https://test_host:1/ingest'
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_auth(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.login = 'airflow'
get_conn_value.password = 'password'
mock_get_connection.return_value = get_conn_value
expected = requests.auth.HTTPBasicAuth('airflow', 'password')
assert self.db_hook.get_auth() == expected
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_auth_with_no_user(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.login = None
get_conn_value.password = 'password'
mock_get_connection.return_value = get_conn_value
assert self.db_hook.get_auth() is None
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_auth_with_no_password(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.login = 'airflow'
get_conn_value.password = None
mock_get_connection.return_value = get_conn_value
assert self.db_hook.get_auth() is None
@patch('airflow.providers.apache.druid.hooks.druid.DruidHook.get_connection')
def test_get_auth_with_no_user_and_password(self, mock_get_connection):
get_conn_value = MagicMock()
get_conn_value.login = None
get_conn_value.password = None
mock_get_connection.return_value = get_conn_value
assert self.db_hook.get_auth() is None
class TestDruidDbApiHook(unittest.TestCase):
def setUp(self):
super().setUp()
self.cur = MagicMock(rowcount=0)
self.conn = conn = MagicMock()
self.conn.host = 'host'
self.conn.port = '1000'
self.conn.conn_type = 'druid'
self.conn.extra_dejson = {'endpoint': 'druid/v2/sql'}
self.conn.cursor.return_value = self.cur
class TestDruidDBApiHook(DruidDbApiHook):
def get_conn(self):
return conn
def get_connection(self, conn_id):
return conn
self.db_hook = TestDruidDBApiHook
def test_get_uri(self):
db_hook = self.db_hook()
assert 'druid://host:1000/druid/v2/sql' == db_hook.get_uri()
def test_get_first_record(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchone.return_value = result_sets[0]
assert result_sets[0] == self.db_hook().get_first(statement)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
def test_get_records(self):
statement = 'SQL'
result_sets = [('row1',), ('row2',)]
self.cur.fetchall.return_value = result_sets
assert result_sets == self.db_hook().get_records(statement)
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
def test_get_pandas_df(self):
statement = 'SQL'
column = 'col'
result_sets = [('row1',), ('row2',)]
self.cur.description = [(column,)]
self.cur.fetchall.return_value = result_sets
df = self.db_hook().get_pandas_df(statement)
assert column == df.columns[0]
for i in range(len(result_sets)):
assert result_sets[i][0] == df.values.tolist()[i][0]
assert self.conn.close.call_count == 1
assert self.cur.close.call_count == 1
self.cur.execute.assert_called_once_with(statement)
| dhuang/incubator-airflow | tests/providers/apache/druid/hooks/test_druid.py | Python | apache-2.0 | 9,442 |
import asyncio
from autobahn.asyncio.websocket import WebSocketServerProtocol, WebSocketServerFactory, \
WebSocketClientProtocol, WebSocketClientFactory
async def create_websocket_connection (Protocol, host, port, loop=None):
assert issubclass(Protocol, WebSocketClientProtocol)
loop = loop or asyncio.get_event_loop()
factory = WebSocketClientFactory()
factory.protocol = Protocol
transport, protocol = await loop.create_connection(factory, host, port)
protocol.transport = transport
return protocol
async def create_websocket_server (Protocol, host, port, loop=None):
assert issubclass(Protocol, WebSocketServerProtocol)
loop = loop or asyncio.get_event_loop()
factory = WebSocketServerFactory()
factory.protocol = Protocol
server = await loop.create_server(factory, host, port)
return server
| onlabsorg/olopy | olopy/websocket.py | Python | mit | 924 |
# Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mock
import testtools
from neutron.plugins.ml2.drivers.mech_sriov.agent.common \
import exceptions as exc
from neutron.plugins.ml2.drivers.mech_sriov.agent import eswitch_manager as esm
from neutron.tests import base
class TestCreateESwitchManager(base.BaseTestCase):
SCANNED_DEVICES = [('0000:06:00.1', 0),
('0000:06:00.2', 1),
('0000:06:00.3', 2)]
def test_create_eswitch_mgr_fail(self):
device_mappings = {'physnet1': ['p6p1']}
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
side_effect=exc.InvalidDeviceError(
dev_name="p6p1", reason="device" " not found")),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.is_assigned_vf",
return_value=True):
with testtools.ExpectedException(exc.InvalidDeviceError):
esm.ESwitchManager().discover_devices(
device_mappings, None)
def test_create_eswitch_mgr_ok(self):
device_mappings = {'physnet1': ['p6p1']}
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
return_value=self.SCANNED_DEVICES),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.is_assigned_vf",
return_value=True):
esm.ESwitchManager().discover_devices(device_mappings, None)
class TestESwitchManagerApi(base.BaseTestCase):
SCANNED_DEVICES = [('0000:06:00.1', 0),
('0000:06:00.2', 1),
('0000:06:00.3', 2)]
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = '0000:06:00.1'
WRONG_MAC = '00:00:00:00:00:67'
WRONG_PCI = "0000:06:00.6"
def setUp(self):
super(TestESwitchManagerApi, self).setUp()
device_mappings = {'physnet1': ['p6p1']}
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
return_value=self.SCANNED_DEVICES),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.is_assigned_vf",
return_value=True):
self.eswitch_mgr = esm.ESwitchManager()
self.eswitch_mgr.discover_devices(device_mappings, None)
def test_get_assigned_devices_info(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_assigned_devices_info",
return_value=[(self.ASSIGNED_MAC, self.PCI_SLOT)]):
result = self.eswitch_mgr.get_assigned_devices_info()
self.assertIn(self.ASSIGNED_MAC, list(result)[0])
self.assertIn(self.PCI_SLOT, list(result)[0])
def test_get_device_status_true(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_device_state",
return_value=True):
result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC,
self.PCI_SLOT)
self.assertTrue(result)
def test_get_device_status_false(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_device_state",
return_value=False):
result = self.eswitch_mgr.get_device_state(self.ASSIGNED_MAC,
self.PCI_SLOT)
self.assertFalse(result)
def test_get_device_status_mismatch(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_device_state",
return_value=True):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.LOG.warning") as log_mock:
result = self.eswitch_mgr.get_device_state(self.WRONG_MAC,
self.PCI_SLOT)
log_mock.assert_called_with('device pci mismatch: '
'%(device_mac)s - %(pci_slot)s',
{'pci_slot': self.PCI_SLOT,
'device_mac': self.WRONG_MAC})
self.assertFalse(result)
def test_set_device_status(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_state"):
self.eswitch_mgr.set_device_state(self.ASSIGNED_MAC,
self.PCI_SLOT, True)
def test_set_device_max_rate(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC) as get_pci_mock,\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_max_rate")\
as set_device_max_rate_mock:
self.eswitch_mgr.set_device_max_rate(self.ASSIGNED_MAC,
self.PCI_SLOT, 1000)
get_pci_mock.assert_called_once_with(self.PCI_SLOT)
set_device_max_rate_mock.assert_called_once_with(
self.PCI_SLOT, 1000)
def test_set_device_status_mismatch(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_state"):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.LOG.warning") as log_mock:
self.eswitch_mgr.set_device_state(self.WRONG_MAC,
self.PCI_SLOT, True)
log_mock.assert_called_with('device pci mismatch: '
'%(device_mac)s - %(pci_slot)s',
{'pci_slot': self.PCI_SLOT,
'device_mac': self.WRONG_MAC})
def _mock_device_exists(self, pci_slot, mac_address, expected_result):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC):
result = self.eswitch_mgr.device_exists(mac_address,
pci_slot)
self.assertEqual(expected_result, result)
def test_device_exists_true(self):
self._mock_device_exists(self.PCI_SLOT,
self.ASSIGNED_MAC,
True)
def test_device_exists_false(self):
self._mock_device_exists(self.WRONG_PCI,
self.WRONG_MAC,
False)
def test_device_exists_mismatch(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.get_pci_device",
return_value=self.ASSIGNED_MAC):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.LOG.warning") as log_mock:
result = self.eswitch_mgr.device_exists(self.WRONG_MAC,
self.PCI_SLOT)
log_mock.assert_called_with('device pci mismatch: '
'%(device_mac)s - %(pci_slot)s',
{'pci_slot': self.PCI_SLOT,
'device_mac': self.WRONG_MAC})
self.assertFalse(result)
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_assigned_macs",
return_value={})
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_max_rate")
def test_clear_max_rate_existing_pci_slot(self, max_rate_mock, *args):
self.eswitch_mgr.clear_max_rate(self.PCI_SLOT)
max_rate_mock.assert_called_once_with(self.PCI_SLOT, 0)
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_assigned_macs",
return_value={0: ASSIGNED_MAC})
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_max_rate")
def test_clear_max_rate_exist_and_assigned_pci(
self, max_rate_mock, *args):
self.eswitch_mgr.clear_max_rate(self.PCI_SLOT)
self.assertFalse(max_rate_mock.called)
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.EmbSwitch.set_device_max_rate")
def test_clear_max_rate_nonexisting_pci_slot(self, max_rate_mock):
self.eswitch_mgr.clear_max_rate(self.WRONG_PCI)
self.assertFalse(max_rate_mock.called)
class TestEmbSwitch(base.BaseTestCase):
DEV_NAME = "eth2"
PHYS_NET = "default"
ASSIGNED_MAC = '00:00:00:00:00:66'
PCI_SLOT = "0000:06:00.1"
WRONG_PCI_SLOT = "0000:06:00.4"
SCANNED_DEVICES = [('0000:06:00.1', 0),
('0000:06:00.2', 1),
('0000:06:00.3', 2)]
VF_TO_MAC_MAPPING = {0: '00:00:00:00:00:11',
1: '00:00:00:00:00:22',
2: '00:00:00:00:00:33'}
EXPECTED_MAC_TO_PCI = {
'00:00:00:00:00:11': '0000:06:00.1',
'00:00:00:00:00:22': '0000:06:00.2',
'00:00:00:00:00:33': '0000:06:00.3'}
def setUp(self):
super(TestEmbSwitch, self).setUp()
exclude_devices = set()
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
return_value=self.SCANNED_DEVICES):
self.emb_switch = esm.EmbSwitch(self.PHYS_NET, self.DEV_NAME,
exclude_devices)
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
return_value=[(PCI_SLOT, 0)])
def test_get_assigned_devices_info(self, *args):
emb_switch = esm.EmbSwitch(self.PHYS_NET, self.DEV_NAME, ())
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_assigned_macs",
return_value={0: self.ASSIGNED_MAC}),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.is_assigned_vf",
return_value=True):
result = emb_switch.get_assigned_devices_info()
self.assertIn(self.ASSIGNED_MAC, list(result)[0])
self.assertIn(self.PCI_SLOT, list(result)[0])
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.scan_vf_devices",
return_value=SCANNED_DEVICES)
def test_get_assigned_devices_info_multiple_slots(self, *args):
emb_switch = esm.EmbSwitch(self.PHYS_NET, self.DEV_NAME, ())
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_assigned_macs",
return_value=self.VF_TO_MAC_MAPPING),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.is_assigned_vf",
return_value=True):
devices_info = emb_switch.get_assigned_devices_info()
for device_info in devices_info:
mac = device_info[0]
pci_slot = device_info[1]
self.assertEqual(
self.EXPECTED_MAC_TO_PCI[mac], pci_slot)
def test_get_assigned_devices_empty(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.is_assigned_vf",
return_value=False):
result = self.emb_switch.get_assigned_devices_info()
self.assertFalse(result)
def test_get_device_state_ok(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_vf_state",
return_value=False):
result = self.emb_switch.get_device_state(self.PCI_SLOT)
self.assertFalse(result)
def test_get_device_state_fail(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_vf_state",
return_value=False):
self.assertRaises(exc.InvalidPciSlotError,
self.emb_switch.get_device_state,
self.WRONG_PCI_SLOT)
def test_set_device_state_ok(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_state"):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"pci_lib.LOG.warning") as log_mock:
self.emb_switch.set_device_state(self.PCI_SLOT, True)
self.assertEqual(0, log_mock.call_count)
def test_set_device_state_fail(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_state"):
self.assertRaises(exc.InvalidPciSlotError,
self.emb_switch.set_device_state,
self.WRONG_PCI_SLOT, True)
def test_set_device_spoofcheck_ok(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_spoofcheck") as \
set_vf_spoofcheck_mock:
self.emb_switch.set_device_spoofcheck(self.PCI_SLOT, True)
self.assertTrue(set_vf_spoofcheck_mock.called)
def test_set_device_spoofcheck_fail(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_spoofcheck"):
self.assertRaises(exc.InvalidPciSlotError,
self.emb_switch.set_device_spoofcheck,
self.WRONG_PCI_SLOT, True)
def test_set_device_max_rate_ok(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2000)
pci_lib_mock.assert_called_with(0, 2)
def test_set_device_max_rate_ok2(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
self.emb_switch.set_device_max_rate(self.PCI_SLOT, 99)
pci_lib_mock.assert_called_with(0, 1)
def test_set_device_max_rate_rounded_ok(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2001)
pci_lib_mock.assert_called_with(0, 2)
def test_set_device_max_rate_rounded_ok2(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2499)
pci_lib_mock.assert_called_with(0, 2)
def test_set_device_max_rate_rounded_ok3(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
self.emb_switch.set_device_max_rate(self.PCI_SLOT, 2500)
pci_lib_mock.assert_called_with(0, 3)
def test_set_device_max_rate_disable(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_max_rate") as pci_lib_mock:
self.emb_switch.set_device_max_rate(self.PCI_SLOT, 0)
pci_lib_mock.assert_called_with(0, 0)
def test_set_device_max_rate_fail(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.set_vf_max_rate"):
self.assertRaises(exc.InvalidPciSlotError,
self.emb_switch.set_device_max_rate,
self.WRONG_PCI_SLOT, 1000)
def test_get_pci_device(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_assigned_macs",
return_value={0: self.ASSIGNED_MAC}),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.is_assigned_vf",
return_value=True):
result = self.emb_switch.get_pci_device(self.PCI_SLOT)
self.assertEqual(self.ASSIGNED_MAC, result)
def test_get_pci_device_fail(self):
with mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.get_assigned_macs",
return_value=[self.ASSIGNED_MAC]),\
mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent."
"eswitch_manager.PciOsWrapper.is_assigned_vf",
return_value=True):
result = self.emb_switch.get_pci_device(self.WRONG_PCI_SLOT)
self.assertIsNone(result)
def test_get_pci_list(self):
result = self.emb_switch.get_pci_slot_list()
self.assertEqual([tup[0] for tup in self.SCANNED_DEVICES],
sorted(result))
class TestPciOsWrapper(base.BaseTestCase):
DEV_NAME = "p7p1"
VF_INDEX = 1
DIR_CONTENTS = [
"mlx4_port1",
"virtfn0",
"virtfn1",
"virtfn2"
]
DIR_CONTENTS_NO_MATCH = [
"mlx4_port1",
"mlx4_port1"
]
LINKS = {
"virtfn0": "../0000:04:00.1",
"virtfn1": "../0000:04:00.2",
"virtfn2": "../0000:04:00.3"
}
PCI_SLOTS = [
('0000:04:00.1', 0),
('0000:04:00.2', 1),
('0000:04:00.3', 2)
]
def test_scan_vf_devices(self):
def _get_link(file_path):
file_name = os.path.basename(file_path)
return self.LINKS[file_name]
with mock.patch("os.path.isdir", return_value=True),\
mock.patch("os.listdir", return_value=self.DIR_CONTENTS),\
mock.patch("os.path.islink", return_value=True),\
mock.patch("os.readlink", side_effect=_get_link):
result = esm.PciOsWrapper.scan_vf_devices(self.DEV_NAME)
self.assertEqual(self.PCI_SLOTS, result)
def test_scan_vf_devices_no_dir(self):
with mock.patch("os.path.isdir", return_value=False):
self.assertRaises(exc.InvalidDeviceError,
esm.PciOsWrapper.scan_vf_devices,
self.DEV_NAME)
def test_scan_vf_devices_no_content(self):
with mock.patch("os.path.isdir", return_value=True),\
mock.patch("os.listdir", return_value=[]):
self.assertRaises(exc.InvalidDeviceError,
esm.PciOsWrapper.scan_vf_devices,
self.DEV_NAME)
def test_scan_vf_devices_no_match(self):
with mock.patch("os.path.isdir", return_value=True),\
mock.patch("os.listdir",
return_value=self.DIR_CONTENTS_NO_MATCH):
self.assertRaises(exc.InvalidDeviceError,
esm.PciOsWrapper.scan_vf_devices,
self.DEV_NAME)
@mock.patch("os.listdir", side_effect=OSError())
def test_is_assigned_vf_true(self, *args):
self.assertTrue(esm.PciOsWrapper.is_assigned_vf(
self.DEV_NAME, self.VF_INDEX))
@mock.patch("os.listdir", return_value=[DEV_NAME, "eth1"])
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.is_macvtap_assigned", return_value=False)
def test_is_assigned_vf_false(self, *args):
self.assertFalse(esm.PciOsWrapper.is_assigned_vf(
self.DEV_NAME, self.VF_INDEX))
@mock.patch("os.listdir", return_value=["eth0", "eth1"])
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.is_macvtap_assigned", return_value=True)
def test_is_assigned_vf_macvtap(
self, mock_is_macvtap_assigned, *args):
esm.PciOsWrapper.is_assigned_vf(self.DEV_NAME, self.VF_INDEX)
mock_is_macvtap_assigned.called_with(self.VF_INDEX, "eth0")
@mock.patch("os.listdir", side_effect=OSError())
@mock.patch("neutron.plugins.ml2.drivers.mech_sriov.agent.pci_lib."
"PciDeviceIPWrapper.is_macvtap_assigned")
def test_is_assigned_vf_macvtap_failure(
self, mock_is_macvtap_assigned, *args):
esm.PciOsWrapper.is_assigned_vf(self.DEV_NAME, self.VF_INDEX)
self.assertFalse(mock_is_macvtap_assigned.called)
| MaximNevrov/neutron | neutron/tests/unit/plugins/ml2/drivers/mech_sriov/agent/test_eswitch_manager.py | Python | apache-2.0 | 23,938 |
#!/usr/bin/env python
import os
import re
import sys
from codecs import open
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
packages = [
'procurecarros'
# 'procurecarros.sdk',
# 'procurecarros.sdk.packages',
# 'procurecarros.sdk.contrib',
# 'procurecarros.sdk.util'
]
requires = []
version = ''
with open('procurecarros_sdk/__init__.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.rst', 'r', 'utf-8') as f:
readme = f.read()
with open('HISTORY.rst', 'r', 'utf-8') as f:
history = f.read()
with open('requirements.txt', 'r') as fr:
requires = fr.readLines()
setup(
name='procurecarros-client-python-sdk',
version=version,
description='Python Library for Procurecarros.',
long_description=readme + '\n\n' + history,
author='Equipe Procurecarros',
author_email='equipe@procurecarros.com',
url='http://www.procurecarros.com',
packages=packages,
package_data={'': ['LICENSE', 'NOTICE'], 'procurecarros_sdk': ['*.pem']},
package_dir={'requests': 'requests'},
include_package_data=True,
install_requires=requires,
license='Apache 2.0',
zip_safe=False,
classifiers=(
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4'
),
extras_require={
'security': ['pyOpenSSL', 'ndg-httpsclient', 'pyasn1'],
},
)
| ProcureCarros/python_sdk | setup.py | Python | gpl-2.0 | 2,072 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.