code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
../../../../../../../share/pyshared/orca/scripts/apps/evolution/formatting.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/scripts/apps/evolution/formatting.py
|
Python
|
gpl-3.0
| 77
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Alex Urban (2019-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Estimating the spectral contribution to inspiral range
We have seen how the binary neutron star (BNS) inspiral range of a
gravitational-wave detector can be measured directly from the strain
readout. In this example, we will estimate the average spectral
contribution to BNS range from the strain record surrounding GW170817
using :func:`gwpy.astro.range_spectrogram`.
"""
__author__ = 'Alex Urban <alexander.urban@ligo.org>'
# First, we need to load some data. As before we can `fetch` the
# `public data <https://www.gw-openscience.org/catalog/>`__
# around the GW170817 BNS merger:
from gwpy.timeseries import TimeSeries
l1 = TimeSeries.fetch_open_data('L1', 1187006834, 1187010930, tag='C02')
# Then, we can calculate a `Spectrogram` of the inspiral range
# amplitude spectrum:
from gwpy.astro import range_spectrogram
l1spec = range_spectrogram(l1, 30, fftlength=4, fmin=15, fmax=500) ** (1./2)
# We can plot this `Spectrogram` to visualise spectral variation in
# LIGO-Livingston's sensitivity in the hour or so surrounding GW170817:
plot = l1spec.plot(figsize=(12, 5))
ax = plot.gca()
ax.set_yscale('log')
ax.set_ylim(15, 500)
ax.set_title('LIGO-Livingston sensitivity to BNS around GW170817')
ax.set_epoch(1187008882) # <- set 0 on plot to GW170817
ax.colorbar(cmap='cividis', clim=(0, 16),
label='BNS range amplitude spectral density '
r'[Mpc/$\sqrt{\mathrm{Hz}}$]')
plot.show()
# Note, the extreme dip in sensitivity near GW170817 is caused by a
# loud, transient noise event, see `Phys. Rev. Lett. vol. 119, p.
# 161101 <http://doi.org/10.1103/PhysRevLett.119.161101>`_ for more
# information.
|
areeda/gwpy
|
examples/miscellaneous/range-spectrogram.py
|
Python
|
gpl-3.0
| 2,377
|
# coding: utf-8
from django.utils.translation import ugettext_lazy as _
class MultiFieldsValidator(object):
fields = ()
error_messages = {
'invalid' : _(u'Value is not valid.')
}
def raise_error(self, form, error="invalid"):
for field in self.fields:
form._errors[field] = form.error_class([self.error_messages[error]])
def validate(self, cleaned_data, form):
return cleaned_data
|
matllubos/django-simple-utilities
|
utilities/forms/validators.py
|
Python
|
gpl-3.0
| 479
|
# ------------------- Information --------------------- #
# Author: Joey Dumont <joey.dumont@gmail.com> #
# Date created: October 18th, 2013 #
# Date mod. October 18th, 2013 #
# Description: We plot the times it took to compute #
# sets of Wigner symbols of different #
# sizes. #
# ----------------------------------------------------- #
# --------------- Modules Importation ----------------- #
from pylab import *
from matplotlib.ticker import AutoMinorLocator
# ----------------- Data Importation ------------------ #
prec = loadtxt("precisionSph.dat")
# ------------------ Plotting data -------------------- #
fig1 = figure(figsize=(7,3))
ax1 = fig1.add_subplot(111)
ax1.plot(prec[:,0],prec[:,1], 'b-')
ax1.plot(prec[:,0],prec[:,2], 'r')
ax1.plot(prec[:,0],prec[:,3], 'k')
minorLocator = AutoMinorLocator()
ax1.xaxis.set_minor_locator(minorLocator)
ax1.set_xlabel(r"$\ell$")
ax1.set_ylabel("Error")
ax1.set_yscale('log')
fig1.savefig("SphPrecision.pdf", bbox_inches="tight")
|
valandil/msc_thesis
|
figs/backmatter/sphericalHarmonics.py
|
Python
|
gpl-3.0
| 998
|
# -*- encoding: utf-8 -*-
from abjad import *
def test_pitchtools_PitchClass_is_pitch_class_number_01():
assert pitchtools.PitchClass.is_pitch_class_number(0)
assert pitchtools.PitchClass.is_pitch_class_number(0.5)
assert pitchtools.PitchClass.is_pitch_class_number(11)
assert pitchtools.PitchClass.is_pitch_class_number(11.5)
def test_pitchtools_PitchClass_is_pitch_class_number_02():
assert not pitchtools.PitchClass.is_pitch_class_number(-1)
assert not pitchtools.PitchClass.is_pitch_class_number(-0.5)
assert not pitchtools.PitchClass.is_pitch_class_number(12)
assert not pitchtools.PitchClass.is_pitch_class_number(99)
assert not pitchtools.PitchClass.is_pitch_class_number('foo')
|
mscuthbert/abjad
|
abjad/tools/pitchtools/test/test_pitchtools_PitchClass_is_pitch_class_number.py
|
Python
|
gpl-3.0
| 727
|
# FST tests related classes
# Copyright (c) 2015, Qualcomm Atheros, Inc.
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
import os
import signal
import time
import re
import hostapd
import wpaspy
import utils
from wpasupplicant import WpaSupplicant
import fst_test_common
logger = logging.getLogger()
def parse_fst_iface_event(ev):
"""Parses FST iface event that comes as a string, e.g.
"<3>FST-EVENT-IFACE attached ifname=wlan9 group=fstg0"
Returns a dictionary with parsed "event_type", "ifname", and "group"; or
None if not an FST event or can't be parsed."""
event = {}
if ev.find("FST-EVENT-IFACE") == -1:
return None
if ev.find("attached") != -1:
event['event_type'] = 'attached'
elif ev.find("detached") != -1:
event['event_type'] = 'detached'
else:
return None
f = re.search("ifname=(\S+)", ev)
if f is not None:
event['ifname'] = f.group(1)
f = re.search("group=(\S+)", ev)
if f is not None:
event['group'] = f.group(1)
return event
def parse_fst_session_event(ev):
"""Parses FST session event that comes as a string, e.g.
"<3>FST-EVENT-SESSION event_type=EVENT_FST_SESSION_STATE session_id=0 reason=REASON_STT"
Returns a dictionary with parsed "type", "id", and "reason"; or None if not
a FST event or can't be parsed"""
event = {}
if ev.find("FST-EVENT-SESSION") == -1:
return None
event['new_state'] = '' # The field always exists in the dictionary
f = re.search("event_type=(\S+)", ev)
if f is None:
return None
event['type'] = f.group(1)
f = re.search("session_id=(\d+)", ev)
if f is not None:
event['id'] = f.group(1)
f = re.search("old_state=(\S+)", ev)
if f is not None:
event['old_state'] = f.group(1)
f = re.search("new_state=(\S+)", ev)
if f is not None:
event['new_state'] = f.group(1)
f = re.search("reason=(\S+)", ev)
if f is not None:
event['reason'] = f.group(1)
return event
def start_two_ap_sta_pairs(apdev, rsn=False):
"""auxiliary function that creates two pairs of APs and STAs"""
ap1 = FstAP(apdev[0]['ifname'], 'fst_11a', 'a',
fst_test_common.fst_test_def_chan_a,
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_low,
fst_test_common.fst_test_def_llt, rsn=rsn)
ap1.start()
ap2 = FstAP(apdev[1]['ifname'], 'fst_11g', 'g',
fst_test_common.fst_test_def_chan_g,
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_high,
fst_test_common.fst_test_def_llt, rsn=rsn)
ap2.start()
sta1 = FstSTA('wlan5',
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_low,
fst_test_common.fst_test_def_llt, rsn=rsn)
sta1.start()
sta2 = FstSTA('wlan6',
fst_test_common.fst_test_def_group,
fst_test_common.fst_test_def_prio_high,
fst_test_common.fst_test_def_llt, rsn=rsn)
sta2.start()
return ap1, ap2, sta1, sta2
def stop_two_ap_sta_pairs(ap1, ap2, sta1, sta2):
sta1.stop()
sta2.stop()
ap1.stop()
ap2.stop()
fst_test_common.fst_clear_regdom()
def connect_two_ap_sta_pairs(ap1, ap2, dev1, dev2, rsn=False):
"""Connects a pair of stations, each one to a separate AP"""
dev1.scan(freq=fst_test_common.fst_test_def_freq_a)
dev2.scan(freq=fst_test_common.fst_test_def_freq_g)
if rsn:
dev1.connect(ap1, psk="12345678",
scan_freq=fst_test_common.fst_test_def_freq_a)
dev2.connect(ap2, psk="12345678",
scan_freq=fst_test_common.fst_test_def_freq_g)
else:
dev1.connect(ap1, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_a)
dev2.connect(ap2, key_mgmt="NONE",
scan_freq=fst_test_common.fst_test_def_freq_g)
def disconnect_two_ap_sta_pairs(ap1, ap2, dev1, dev2):
dev1.disconnect()
dev2.disconnect()
def external_sta_connect(sta, ap, **kwargs):
"""Connects the external station to the given AP"""
if not isinstance(sta, WpaSupplicant):
raise Exception("Bad STA object")
if not isinstance(ap, FstAP):
raise Exception("Bad AP object to connect to")
hap = ap.get_instance()
sta.connect(ap.get_ssid(), **kwargs)
def disconnect_external_sta(sta, ap, check_disconnect=True):
"""Disconnects the external station from the AP"""
if not isinstance(sta, WpaSupplicant):
raise Exception("Bad STA object")
if not isinstance(ap, FstAP):
raise Exception("Bad AP object to connect to")
sta.request("DISCONNECT")
if check_disconnect:
hap = ap.get_instance()
ev = hap.wait_event(["AP-STA-DISCONNECTED"], timeout=10)
if ev is None:
raise Exception("No disconnection event received from %s" % ap.get_ssid())
#
# FstDevice class
# This is the parent class for the AP (FstAP) and STA (FstSTA) that implements
# FST functionality.
#
class FstDevice:
def __init__(self, iface, fst_group, fst_pri, fst_llt=None, rsn=False):
self.iface = iface
self.fst_group = fst_group
self.fst_pri = fst_pri
self.fst_llt = fst_llt # None llt means no llt parameter will be set
self.instance = None # Hostapd/WpaSupplicant instance
self.peer_obj = None # Peer object, must be a FstDevice child object
self.new_peer_addr = None # Peer MAC address for new session iface
self.old_peer_addr = None # Peer MAC address for old session iface
self.role = 'initiator' # Role: initiator/responder
s = self.grequest("FST-MANAGER TEST_REQUEST IS_SUPPORTED")
if not s.startswith('OK'):
raise utils.HwsimSkip("FST not supported")
self.rsn = rsn
def ifname(self):
return self.iface
def get_instance(self):
"""Gets the Hostapd/WpaSupplicant instance"""
raise Exception("Virtual get_instance() called!")
def get_own_mac_address(self):
"""Gets the device's own MAC address"""
raise Exception("Virtual get_own_mac_address() called!")
def get_new_peer_addr(self):
return self.new_peer_addr
def get_old_peer_addr(self):
return self.old_peer_addr
def get_actual_peer_addr(self):
"""Gets the peer address. A connected AP/station address is returned."""
raise Exception("Virtual get_actual_peer_addr() called!")
def grequest(self, req):
"""Send request on the global control interface"""
raise Exception("Virtual grequest() called!")
def wait_gevent(self, events, timeout=None):
"""Wait for a list of events on the global interface"""
raise Exception("Virtual wait_gevent() called!")
def request(self, req):
"""Issue a request to the control interface"""
h = self.get_instance()
return h.request(req)
def wait_event(self, events, timeout=None):
"""Wait for an event from the control interface"""
h = self.get_instance()
if timeout is not None:
return h.wait_event(events, timeout=timeout)
else:
return h.wait_event(events)
def set_old_peer_addr(self, peer_addr=None):
"""Sets the peer address"""
if peer_addr is not None:
self.old_peer_addr = peer_addr
else:
self.old_peer_addr = self.get_actual_peer_addr()
def set_new_peer_addr(self, peer_addr=None):
"""Sets the peer address"""
if peer_addr is not None:
self.new_peer_addr = peer_addr
else:
self.new_peer_addr = self.get_actual_peer_addr()
def add_peer(self, obj, old_peer_addr=None, new_peer_addr=None):
"""Add peer for FST session(s). 'obj' is a FstDevice subclass object.
The method must be called before add_session().
If peer_addr is not specified, the address of the currently connected
station is used."""
if not isinstance(obj, FstDevice):
raise Exception("Peer must be a FstDevice object")
self.peer_obj = obj
self.set_old_peer_addr(old_peer_addr)
self.set_new_peer_addr(new_peer_addr)
def get_peer(self):
"""Returns peer object"""
return self.peer_obj
def set_fst_parameters(self, group_id=None, pri=None, llt=None):
"""Change/set new FST parameters. Can be used to start FST sessions with
different FST parameters than defined in the configuration file."""
if group_id is not None:
self.fst_group = group_id
if pri is not None:
self.fst_pri = pri
if llt is not None:
self.fst_llt = llt
def get_local_mbies(self, ifname=None):
if_name = ifname if ifname is not None else self.iface
return self.grequest("FST-MANAGER TEST_REQUEST GET_LOCAL_MBIES " + if_name)
def add_session(self):
"""Adds an FST session. add_peer() must be called calling this
function"""
if self.peer_obj is None:
raise Exception("Peer wasn't added before starting session")
self.dump_monitor()
grp = ' ' + self.fst_group if self.fst_group != '' else ''
sid = self.grequest("FST-MANAGER SESSION_ADD" + grp)
sid = sid.strip()
if sid.startswith("FAIL"):
raise Exception("Cannot add FST session with groupid ==" + grp)
self.dump_monitor()
return sid
def set_session_param(self, params):
request = "FST-MANAGER SESSION_SET"
if params is not None and params != '':
request = request + ' ' + params
return self.grequest(request)
def get_session_params(self, sid):
request = "FST-MANAGER SESSION_GET " + sid
res = self.grequest(request)
if res.startswith("FAIL"):
return None
params = {}
for i in res.splitlines():
p = i.split('=')
params[p[0]] = p[1]
return params
def iface_peers(self, ifname):
grp = self.fst_group if self.fst_group != '' else ''
res = self.grequest("FST-MANAGER IFACE_PEERS " + grp + ' ' + ifname)
if res.startswith("FAIL"):
return None
return res.splitlines()
def get_peer_mbies(self, ifname, peer_addr):
return self.grequest("FST-MANAGER GET_PEER_MBIES %s %s" % (ifname, peer_addr))
def list_ifaces(self):
grp = self.fst_group if self.fst_group != '' else ''
res = self.grequest("FST-MANAGER LIST_IFACES " + grp)
if res.startswith("FAIL"):
return None
ifaces = []
for i in res.splitlines():
p = i.split(':')
iface = {}
iface['name'] = p[0]
iface['priority'] = p[1]
iface['llt'] = p[2]
ifaces.append(iface)
return ifaces
def list_groups(self):
res = self.grequest("FST-MANAGER LIST_GROUPS")
if res.startswith("FAIL"):
return None
return res.splitlines()
def configure_session(self, sid, new_iface, old_iface=None):
"""Calls session_set for a number of parameters some of which are stored
in "self" while others are passed to this function explicitly. If
old_iface is None, current iface is used; if old_iface is an empty
string."""
self.dump_monitor()
oldiface = old_iface if old_iface is not None else self.iface
s = self.set_session_param(sid + ' old_ifname=' + oldiface)
if not s.startswith("OK"):
raise Exception("Cannot set FST session old_ifname: " + s)
if new_iface is not None:
s = self.set_session_param(sid + " new_ifname=" + new_iface)
if not s.startswith("OK"):
raise Exception("Cannot set FST session new_ifname:" + s)
if self.new_peer_addr is not None and self.new_peer_addr != '':
s = self.set_session_param(sid + " new_peer_addr=" + self.new_peer_addr)
if not s.startswith("OK"):
raise Exception("Cannot set FST session peer address:" + s + " (new)")
if self.old_peer_addr is not None and self.old_peer_addr != '':
s = self.set_session_param(sid + " old_peer_addr=" + self.old_peer_addr)
if not s.startswith("OK"):
raise Exception("Cannot set FST session peer address:" + s + " (old)")
if self.fst_llt is not None and self.fst_llt != '':
s = self.set_session_param(sid + " llt=" + self.fst_llt)
if not s.startswith("OK"):
raise Exception("Cannot set FST session llt:" + s)
self.dump_monitor()
def send_iface_attach_request(self, ifname, group, llt, priority):
request = "FST-ATTACH " + ifname + ' ' + group
if llt is not None:
request += " llt=" + llt
if priority is not None:
request += " priority=" + priority
res = self.grequest(request)
if not res.startswith("OK"):
raise Exception("Cannot attach FST iface: " + res)
def send_iface_detach_request(self, ifname):
res = self.grequest("FST-DETACH " + ifname)
if not res.startswith("OK"):
raise Exception("Cannot detach FST iface: " + res)
def send_session_setup_request(self, sid):
s = self.grequest("FST-MANAGER SESSION_INITIATE " + sid)
if not s.startswith('OK'):
raise Exception("Cannot send setup request: %s" % s)
return s
def send_session_setup_response(self, sid, response):
request = "FST-MANAGER SESSION_RESPOND " + sid + " " + response
s = self.grequest(request)
if not s.startswith('OK'):
raise Exception("Cannot send setup response: %s" % s)
return s
def send_test_session_setup_request(self, fsts_id,
additional_parameter=None):
request = "FST-MANAGER TEST_REQUEST SEND_SETUP_REQUEST " + fsts_id
if additional_parameter is not None:
request += " " + additional_parameter
s = self.grequest(request)
if not s.startswith('OK'):
raise Exception("Cannot send FST setup request: %s" % s)
return s
def send_test_session_setup_response(self, fsts_id,
response, additional_parameter=None):
request = "FST-MANAGER TEST_REQUEST SEND_SETUP_RESPONSE " + fsts_id + " " + response
if additional_parameter is not None:
request += " " + additional_parameter
s = self.grequest(request)
if not s.startswith('OK'):
raise Exception("Cannot send FST setup response: %s" % s)
return s
def send_test_ack_request(self, fsts_id):
s = self.grequest("FST-MANAGER TEST_REQUEST SEND_ACK_REQUEST " + fsts_id)
if not s.startswith('OK'):
raise Exception("Cannot send FST ack request: %s" % s)
return s
def send_test_ack_response(self, fsts_id):
s = self.grequest("FST-MANAGER TEST_REQUEST SEND_ACK_RESPONSE " + fsts_id)
if not s.startswith('OK'):
raise Exception("Cannot send FST ack response: %s" % s)
return s
def send_test_tear_down(self, fsts_id):
s = self.grequest("FST-MANAGER TEST_REQUEST SEND_TEAR_DOWN " + fsts_id)
if not s.startswith('OK'):
raise Exception("Cannot send FST tear down: %s" % s)
return s
def get_fsts_id_by_sid(self, sid):
s = self.grequest("FST-MANAGER TEST_REQUEST GET_FSTS_ID " + sid)
if s == ' ' or s.startswith('FAIL'):
raise Exception("Cannot get fsts_id for sid == %s" % sid)
return int(s)
def wait_for_iface_event(self, timeout):
while True:
ev = self.wait_gevent(["FST-EVENT-IFACE"], timeout)
if ev is None:
raise Exception("No FST-EVENT-IFACE received")
event = parse_fst_iface_event(ev)
if event is None:
# We can't parse so it's not our event, wait for next one
continue
return event
def wait_for_session_event(self, timeout, events_to_ignore=[],
events_to_count=[]):
while True:
ev = self.wait_gevent(["FST-EVENT-SESSION"], timeout)
if ev is None:
raise Exception("No FST-EVENT-SESSION received")
event = parse_fst_session_event(ev)
if event is None:
# We can't parse so it's not our event, wait for next one
continue
if len(events_to_ignore) > 0:
if event['type'] in events_to_ignore:
continue
elif len(events_to_count) > 0:
if event['type'] not in events_to_count:
continue
return event
def initiate_session(self, sid, response="accept"):
"""Initiates FST session with given session id 'sid'.
'response' is the session respond answer: "accept", "reject", or a
special "timeout" value to skip the response in order to test session
timeouts.
Returns: "OK" - session has been initiated, otherwise the reason for the
reset: REASON_REJECT, REASON_STT."""
strsid = ' ' + sid if sid != '' else ''
s = self.grequest("FST-MANAGER SESSION_INITIATE"+ strsid)
if not s.startswith('OK'):
raise Exception("Cannot initiate fst session: %s" % s)
ev = self.peer_obj.wait_gevent(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION received")
# We got FST event
event = parse_fst_session_event(ev)
if event == None:
raise Exception("Unrecognized FST event: " % ev)
if event['type'] != 'EVENT_FST_SETUP':
raise Exception("Expected FST_SETUP event, got: " + event['type'])
ev = self.peer_obj.wait_gevent(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION received")
event = parse_fst_session_event(ev)
if event == None:
raise Exception("Unrecognized FST event: " % ev)
if event['type'] != 'EVENT_FST_SESSION_STATE':
raise Exception("Expected EVENT_FST_SESSION_STATE event, got: " + event['type'])
if event['new_state'] != "SETUP_COMPLETION":
raise Exception("Expected new state SETUP_COMPLETION, got: " + event['new_state'])
if response == '':
return 'OK'
if response != "timeout":
s = self.peer_obj.grequest("FST-MANAGER SESSION_RESPOND "+ event['id'] + " " + response) # Or reject
if not s.startswith('OK'):
raise Exception("Error session_respond: %s" % s)
# Wait for EVENT_FST_SESSION_STATE events. We should get at least 2
# events. The 1st event will be EVENT_FST_SESSION_STATE
# old_state=INITIAL new_state=SETUP_COMPLETED. The 2nd event will be
# either EVENT_FST_ESTABLISHED with the session id or
# EVENT_FST_SESSION_STATE with new_state=INITIAL if the session was
# reset, the reason field will tell why.
result = ''
while result == '':
ev = self.wait_gevent(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
break # No session event received
event = parse_fst_session_event(ev)
if event == None:
# We can't parse so it's not our event, wait for next one
continue
if event['type'] == 'EVENT_FST_ESTABLISHED':
result = "OK"
break
elif event['type'] == "EVENT_FST_SESSION_STATE":
if event['new_state'] == "INITIAL":
# Session was reset, the only reason to get back to initial
# state.
result = event['reason']
break
if result == '':
raise Exception("No event for session respond")
return result
def transfer_session(self, sid):
"""Transfers the session. 'sid' is the session id. 'hsta' is the
station-responder object.
Returns: REASON_SWITCH - the session has been transferred successfully
or a REASON_... reported by the reset event."""
request = "FST-MANAGER SESSION_TRANSFER"
self.dump_monitor()
if sid != '':
request += ' ' + sid
s = self.grequest(request)
if not s.startswith('OK'):
raise Exception("Cannot transfer fst session: %s" % s)
result = ''
while result == '':
ev = self.peer_obj.wait_gevent(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("Missing session transfer event")
# We got FST event. We expect TRANSITION_CONFIRMED state and then
# INITIAL (reset) with the reason (e.g. "REASON_SWITCH").
# Right now we'll be waiting for the reset event and record the
# reason.
event = parse_fst_session_event(ev)
if event == None:
raise Exception("Unrecognized FST event: " % ev)
if event['new_state'] == 'INITIAL':
result = event['reason']
self.dump_monitor()
return result
def wait_for_tear_down(self):
ev = self.wait_gevent(["FST-EVENT-SESSION"], timeout=5)
if ev is None:
raise Exception("No FST-EVENT-SESSION received")
# We got FST event
event = parse_fst_session_event(ev)
if event == None:
raise Exception("Unrecognized FST event: " % ev)
if event['type'] != 'EVENT_FST_SESSION_STATE':
raise Exception("Expected EVENT_FST_SESSION_STATE event, got: " + event['type'])
if event['new_state'] != "INITIAL":
raise Exception("Expected new state INITIAL, got: " + event['new_state'])
if event['reason'] != 'REASON_TEARDOWN':
raise Exception("Expected reason REASON_TEARDOWN, got: " + event['reason'])
def teardown_session(self, sid):
"""Tears down FST session with a given session id ('sid')"""
strsid = ' ' + sid if sid != '' else ''
s = self.grequest("FST-MANAGER SESSION_TEARDOWN" + strsid)
if not s.startswith('OK'):
raise Exception("Cannot tear down fst session: %s" % s)
self.peer_obj.wait_for_tear_down()
def remove_session(self, sid, wait_for_tear_down=True):
"""Removes FST session with a given session id ('sid')"""
strsid = ' ' + sid if sid != '' else ''
s = self.grequest("FST-MANAGER SESSION_REMOVE" + strsid)
if not s.startswith('OK'):
raise Exception("Cannot remove fst session: %s" % s)
if wait_for_tear_down == True:
self.peer_obj.wait_for_tear_down()
def remove_all_sessions(self):
"""Removes FST session with a given session id ('sid')"""
grp = ' ' + self.fst_group if self.fst_group != '' else ''
s = self.grequest("FST-MANAGER LIST_SESSIONS" + grp)
if not s.startswith('FAIL'):
for sid in s.splitlines():
sid = sid.strip()
if len(sid) != 0:
self.remove_session(sid, wait_for_tear_down=False)
#
# FstAP class
#
class FstAP(FstDevice):
def __init__(self, iface, ssid, mode, chan, fst_group, fst_pri,
fst_llt=None, rsn=False):
"""If fst_group is empty, then FST parameters will not be set
If fst_llt is empty, the parameter will not be set and the default value
is expected to be configured."""
self.ssid = ssid
self.mode = mode
self.chan = chan
self.reg_ctrl = fst_test_common.HapdRegCtrl()
self.reg_ctrl.add_ap(iface, self.chan)
self.global_instance = hostapd.HostapdGlobal()
FstDevice.__init__(self, iface, fst_group, fst_pri, fst_llt, rsn)
def start(self, return_early=False):
"""Starts AP the "standard" way as it was intended by hostapd tests.
This will work only when FST supports fully dynamically loading
parameters in hostapd."""
params = {}
params['ssid'] = self.ssid
params['hw_mode'] = self.mode
params['channel'] = self.chan
params['country_code'] = 'US'
if self.rsn:
params['wpa'] = '2'
params['wpa_key_mgmt'] = 'WPA-PSK'
params['rsn_pairwise'] = 'CCMP'
params['wpa_passphrase'] = '12345678'
self.hapd = hostapd.add_ap(self.iface, params)
if not self.hapd.ping():
raise Exception("Could not ping FST hostapd")
self.reg_ctrl.start()
self.get_global_instance()
if return_early:
return self.hapd
if len(self.fst_group) != 0:
self.send_iface_attach_request(self.iface, self.fst_group,
self.fst_llt, self.fst_pri)
return self.hapd
def stop(self):
"""Removes the AP, To be used when dynamic fst APs are implemented in
hostapd."""
if len(self.fst_group) != 0:
self.remove_all_sessions()
try:
self.send_iface_detach_request(self.iface)
except Exception as e:
logger.info(str(e))
self.reg_ctrl.stop()
del self.global_instance
self.global_instance = None
def get_instance(self):
"""Return the Hostapd/WpaSupplicant instance"""
if self.instance is None:
self.instance = hostapd.Hostapd(self.iface)
return self.instance
def get_global_instance(self):
return self.global_instance
def get_own_mac_address(self):
"""Gets the device's own MAC address"""
h = self.get_instance()
status = h.get_status()
return status['bssid[0]']
def get_actual_peer_addr(self):
"""Gets the peer address. A connected station address is returned."""
# Use the device instance, the global control interface doesn't have
# station address
h = self.get_instance()
sta = h.get_sta(None)
if sta is None or 'addr' not in sta:
# Maybe station is not connected?
addr = None
else:
addr = sta['addr']
return addr
def grequest(self, req):
"""Send request on the global control interface"""
logger.debug("FstAP::grequest: " + req)
h = self.get_global_instance()
return h.request(req)
def wait_gevent(self, events, timeout=None):
"""Wait for a list of events on the global interface"""
h = self.get_global_instance()
if timeout is not None:
return h.wait_event(events, timeout=timeout)
else:
return h.wait_event(events)
def get_ssid(self):
return self.ssid
def dump_monitor(self):
"""Dump control interface monitor events"""
if self.instance:
self.instance.dump_monitor()
#
# FstSTA class
#
class FstSTA(FstDevice):
def __init__(self, iface, fst_group, fst_pri, fst_llt=None, rsn=False):
"""If fst_group is empty, then FST parameters will not be set
If fst_llt is empty, the parameter will not be set and the default value
is expected to be configured."""
FstDevice.__init__(self, iface, fst_group, fst_pri, fst_llt, rsn)
self.connected = None # FstAP object the station is connected to
def start(self):
"""Current implementation involves running another instance of
wpa_supplicant with fixed FST STAs configurations. When any type of
dynamic STA loading is implemented, rewrite the function similarly to
FstAP."""
h = self.get_instance()
h.interface_add(self.iface, drv_params="force_connect_cmd=1")
if not h.global_ping():
raise Exception("Could not ping FST wpa_supplicant")
if len(self.fst_group) != 0:
self.send_iface_attach_request(self.iface, self.fst_group,
self.fst_llt, self.fst_pri)
return None
def stop(self):
"""Removes the STA. In a static (temporary) implementation does nothing,
the STA will be removed when the fst wpa_supplicant process is killed by
fstap.cleanup()."""
h = self.get_instance()
h.dump_monitor()
if len(self.fst_group) != 0:
self.remove_all_sessions()
self.send_iface_detach_request(self.iface)
h.dump_monitor()
h.interface_remove(self.iface)
h.close_ctrl()
del h
self.instance = None
def get_instance(self):
"""Return the Hostapd/WpaSupplicant instance"""
if self.instance is None:
self.instance = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
return self.instance
def get_own_mac_address(self):
"""Gets the device's own MAC address"""
h = self.get_instance()
status = h.get_status()
return status['address']
def get_actual_peer_addr(self):
"""Gets the peer address. A connected station address is returned"""
h = self.get_instance()
status = h.get_status()
return status['bssid']
def grequest(self, req):
"""Send request on the global control interface"""
logger.debug("FstSTA::grequest: " + req)
h = self.get_instance()
return h.global_request(req)
def wait_gevent(self, events, timeout=None):
"""Wait for a list of events on the global interface"""
h = self.get_instance()
if timeout is not None:
return h.wait_global_event(events, timeout=timeout)
else:
return h.wait_global_event(events)
def scan(self, freq=None, no_wait=False, only_new=False):
"""Issue Scan with given parameters. Returns the BSS dictionary for the
AP found (the 1st BSS found. TODO: What if the AP required is not the
1st in list?) or None if no BSS found. None call be also a result of
no_wait=True. Note, request("SCAN_RESULTS") can be used to get all the
results at once."""
h = self.get_instance()
h.dump_monitor()
h.scan(None, freq, no_wait, only_new)
r = h.get_bss('0')
h.dump_monitor()
return r
def connect(self, ap, **kwargs):
"""Connects to the given AP"""
if not isinstance(ap, FstAP):
raise Exception("Bad AP object to connect to")
h = self.get_instance()
hap = ap.get_instance()
h.dump_monitor()
h.connect(ap.get_ssid(), **kwargs)
h.dump_monitor()
self.connected = ap
def connect_to_external_ap(self, ap, ssid, check_connection=True, **kwargs):
"""Connects to the given external AP"""
if not isinstance(ap, hostapd.Hostapd):
raise Exception("Bad AP object to connect to")
h = self.get_instance()
h.dump_monitor()
h.connect(ssid, **kwargs)
self.connected = ap
if check_connection:
ev = ap.wait_event(["AP-STA-CONNECTED"], timeout=10)
if ev is None:
self.connected = None
raise Exception("No connection event received from %s" % ssid)
h.dump_monitor()
def disconnect(self, check_disconnect=True):
"""Disconnects from the AP the station is currently connected to"""
if self.connected is not None:
h = self.get_instance()
h.dump_monitor()
h.request("DISCONNECT")
if check_disconnect:
hap = self.connected.get_instance()
ev = hap.wait_event(["AP-STA-DISCONNECTED"], timeout=10)
if ev is None:
raise Exception("No disconnection event received from %s" % self.connected.get_ssid())
h.dump_monitor()
self.connected = None
def disconnect_from_external_ap(self, check_disconnect=True):
"""Disconnects from the external AP the station is currently connected
to"""
if self.connected is not None:
h = self.get_instance()
h.dump_monitor()
h.request("DISCONNECT")
if check_disconnect:
hap = self.connected
ev = hap.wait_event(["AP-STA-DISCONNECTED"], timeout=10)
if ev is None:
raise Exception("No disconnection event received from AP")
h.dump_monitor()
self.connected = None
def dump_monitor(self):
"""Dump control interface monitor events"""
if self.instance:
self.instance.dump_monitor()
|
s0lst1c3/eaphammer
|
local/hostapd-eaphammer/tests/hwsim/fst_module_aux.py
|
Python
|
gpl-3.0
| 33,202
|
from django.contrib import admin
from . import models
|
sachinkum/Bal-Aveksha
|
WebServer/Authentications/admin.py
|
Python
|
gpl-3.0
| 54
|
import sys
import numpy
import scipy
import json
import itertools
import random
import os
from sys import stderr, exit, argv
from scipy.sparse.linalg import spsolve
from sklearn.metrics.pairwise import euclidean_distances
from nltk.stem import SnowballStemmer
def load_data_sparse(prefix) :
return scipy.sparse.csr_matrix((numpy.load(prefix + '.data.npy'),
numpy.load(prefix + '.indices.npy'),
numpy.load(prefix + '.indptr.npy')),
shape=tuple(numpy.load(prefix + '.shape.npy')))
def load_data() :
return load_data_sparse('linrel')
def load_features_json(fname) :
with open(fname) as f :
return json.load(f)
def load_features() :
return load_features_json('linrel_features.json')
def load_topics() :
return load_features_json('linrel_topics.json')
def get_machine_learning_articles() :
return [ int(k) for k,v in load_topics().iteritems() if 'stat.ML' in v ]
def order_keys_by_value(d) :
return [ i[0] for i in sorted(d.items(), key=lambda x : x[1], reverse=True) ]
def okapi_bm25(query, n, data, features) :
stemmer = SnowballStemmer('english')
query_terms = [ stemmer.stem(term) for term in query.lower().split() ]
tmp = {}
for qt in query_terms :
if qt not in features :
continue
findex = features[qt]
for aindex in numpy.nonzero(data[:, findex])[0] :
akey = aindex.item()
if akey not in tmp :
tmp[akey] = 1.0
tmp[akey] *= data[aindex,findex]
return order_keys_by_value(tmp)[:n]
def linrel(articles, feedback, n, data, features, mew=1.0, exploration_rate=0.1) :
assert len(articles) == len(feedback), "articles and feedback are not the same length"
X = data
num_articles = X.shape[0]
num_features = X.shape[1]
X_t = X[ numpy.array(articles) ]
X_tt = X_t.transpose()
I = mew * scipy.sparse.identity(num_features, format='dia')
W = spsolve((X_tt * X_t) + I, X_tt)
A = X * W
Y_t = numpy.matrix(feedback).transpose()
tmpA = numpy.array(A.todense())
normL2 = numpy.matrix(numpy.sqrt(numpy.sum(tmpA * tmpA, axis=1))).transpose()
# W * Y_t is the keyword weights
K = W * Y_t
mean = A * Y_t
variance = (exploration_rate / 2.0) * normL2
I_t = mean + variance
linrel_ordered = numpy.argsort(I_t.transpose()[0]).tolist()[0]
top_n = []
for i in linrel_ordered[::-1] :
if i not in articles :
top_n.append(i)
if len(top_n) == n :
break
return top_n, \
mean[ numpy.array(top_n) ].transpose().tolist()[0], \
variance[ numpy.array(top_n) ].transpose().tolist()[0]
def average_distance_to_target(articles, target, distances) :
return numpy.min(distances[ numpy.array(articles) ])
def main() :
if len(argv) != 4 :
print >> stderr, "Usage: %s <article index|random> <output dir> <exploration rate>" % argv[0]
exit(1)
# parse input
try :
experiment_target = int(argv[1]) if argv[1] != 'random' else None
except ValueError :
print >> stderr, "Error, %s is not an integer!" % argv[1]
exit(1)
results_dir = argv[2]
if not os.path.isdir(results_dir) :
print >> stderr, "Error, %s is not a directory/does not exist!" % results_dir
exit(1)
try :
test_explore_rate = float(argv[3])
except ValueError :
print >> stderr, "Error, %s is not a float!" % argv[3]
exit(1)
# constants
num_shown = 10
num_iterations = 10
num_selections = range(num_shown + 1)
#test_explore_rate = 0.1
experiment_query = "machine learning"
# load the data
data = load_data()
num_articles = data.shape[0]
num_features = data.shape[1]
print "loaded %d articles x %d features" % (num_articles, num_features)
features = load_features()
print "loaded %d features" % len(features)
machine_learning_articles = get_machine_learning_articles()
num_ml_articles = len(machine_learning_articles)
print "loaded %d stat.ML articles" % num_ml_articles
# make sure the data is consistent
assert len(features) == num_features, \
"the number of features differed in the matrix vs the feature list"
# make sure the input is correct
assert experiment_target is None or experiment_target in machine_learning_articles, \
"article %d is not a machine learning article!" % experiment_target
# pick a random target document if needed
if not experiment_target :
experiment_target = machine_learning_articles[random.randint(0, num_ml_articles-1)]
print "random selection of target article %d" % experiment_target
# test if this has been done before
out_filename = results_filename(results_dir, experiment_target)
if os.path.exists(out_filename) :
print "%s exists, exiting..." % out_filename
exit(0)
# precalculate all the distances between all documents and the target
print "calculating distances to article %d" % experiment_target
experiment_distances = euclidean_distances(data, data[experiment_target, :])
# run an initial query using tfidf
print "running okapi bm25 with query '%s'" % experiment_query
experiment_articles = okapi_bm25(experiment_query, num_shown, data, features)
experiment_feedback = []
experiment_means = []
experiment_variances = []
# run for X iterations
for iteration in range(num_iterations) :
# count = 0
# print >> stderr, "iter %d - %d" % (iteration, count),
#
# best_feedback = None
# best_average_distance = sys.float_info.max
# best_version = -1
# user can pick 0 -> 10 articles
# for i in num_selections :
# # go through all possible combinations of feedback
# # to select what the user does
# for selections in itertools.combinations(range(num_shown), i) :
# feedback = [ 1.0 if i in selections else 0.0 for i in range(num_shown) ]
#
# # run linrel without exploration using generated feedback
# articles,means,variances = linrel(experiment_articles,
# experiment_feedback + feedback,
# num_shown,
# data,
# features,
# exploration_rate=0.0)
#
# # test if these documents are better than the 'current best feedback'
# # based on average (?) distance to target
# average_distance = average_distance_to_target(articles,
# experiment_target,
# experiment_distances)
#
# if average_distance < best_average_distance :
# best_version = count
# best_feedback = feedback
# best_average_distance = average_distance
#
# count += 1
# print >> stderr, "\riter %d - %d (best = %d, distance = %f)" % (iteration, count, best_version, best_average_distance),
remaining_articles = range(num_shown)
selected_articles = []
# BASE AVERAGE SHOULD BE WITH NO SELECTIONS
articles,means,variances = linrel(experiment_articles,
experiment_feedback + ([0.0] * num_shown),
num_shown,
data,
features,
exploration_rate=0.0)
current_average_distance = average_distance_to_target(articles,
experiment_target,
experiment_distances)
print >> stderr, "test %d: distance=%.3f selections=%s" % (iteration, current_average_distance, str(selected_articles))
for i in num_selections :
best_article = None
best_average_distance = sys.float_info.max
for a in remaining_articles :
tmp = selected_articles + [a]
feedback = [ 1.0 if i in tmp else 0.0 for i in range(num_shown) ]
# run linrel without exploration using generated feedback
articles,means,variances = linrel(experiment_articles,
experiment_feedback + feedback,
num_shown,
data,
features,
exploration_rate=0.0)
# test if these documents are better than the 'current best feedback'
# based on average (?) distance to target
average_distance = average_distance_to_target(articles,
experiment_target,
experiment_distances)
# keep a note of the article selection that resulted in the min distance to the target
if average_distance < best_average_distance :
best_article = a
best_average_distance = average_distance
print >> stderr, "test %d: distance=%.3f selections=%s" % (iteration, best_average_distance, str(selected_articles + [a]))
# test to see if the best article to add actually increases the distance
# to the target
if best_average_distance >= current_average_distance :
print >> stderr, "stop %d: distance=%.3f selections=%s" % (iteration, current_average_distance, str(selected_articles))
break
selected_articles.append(best_article)
remaining_articles.remove(best_article)
current_average_distance = best_average_distance
print >> stderr, ""
best_feedback = [ 1.0 if i in selected_articles else 0.0 for i in range(num_shown) ]
# we now know what to select, run the actual linrel code with
# actual exploration rate
experiment_feedback += best_feedback
articles,means,variances = linrel(experiment_articles,
experiment_feedback,
num_shown,
data,
features,
exploration_rate=test_explore_rate)
true_average_distance = average_distance_to_target(articles,
experiment_target,
experiment_distances)
print >> stderr, "iter %d: distance=%.3f selections=%s" % (iteration, true_average_distance, str(selected_articles))
print >> stderr, ""
# store everything
experiment_articles.extend(articles)
experiment_means.extend(means)
experiment_variances.extend(variances)
#print experiment_articles
#print [ int(i) for i in experiment_feedback ]
#print experiment_means
#print experiment_variances
guff = {
"out_filename" : out_filename,
"target" : experiment_target,
"query" : experiment_query,
"exploration_rate" : test_explore_rate,
"num_shown" : num_shown,
"num_iterations" : num_iterations,
"num_articles" : num_articles,
"num_features" : num_features
}
# save to file
write_pulp_results(guff,
experiment_articles,
experiment_feedback,
experiment_means,
experiment_variances)
return 0
def results_filename(results_dir, target) :
return os.path.join(results_dir, "results%d.txt" % target)
def write_pulp_results(settings, articles, feedback, means, variances) :
delimit = " "
header = ["iteration", "article", "feedback", "mean", "variance"]
filename = settings["out_filename"]
with open(filename, 'w') as f :
print >> f, "# " + " ".join([ "%s=%s" % (k, '"%s"' % v if isinstance(v, str) else str(v)) for k,v in settings.items() ])
print >> f, delimit.join(header)
iterations = sorted(range(settings["num_iterations"]) * settings["num_shown"])
feedback = [ int(i) for i in feedback ]
for i in zip(iterations, articles, feedback, means, variances) :
print >> f, "%d %d %d %e %e" % i
print "wrote %s" % filename
if __name__ == '__main__' :
try :
exit(main())
except KeyboardInterrupt :
print >> stderr, "Killed by User...\n"
exit(1)
|
genie9/pulp
|
pulp_simulator.py
|
Python
|
gpl-3.0
| 13,319
|
"""
Copyright (c) 2012-2020 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from smart_manager.models import CPUMetric
from smart_manager.serializers import CPUMetricSerializer
from generic_sprobe import GenericSProbeView
class CPUMetricView(GenericSProbeView):
serializer_class = CPUMetricSerializer
model_obj = CPUMetric
|
rockstor/rockstor-core
|
src/rockstor/smart_manager/views/cpu_util.py
|
Python
|
gpl-3.0
| 963
|
# -*- coding: utf-8 -*-
import base64
import os
import io
import six
from tempfile import _TemporaryFileWrapper
from pretty_bad_protocol._util import _STREAMLIKE_TYPES
from cryptography.exceptions import AlreadyFinalized
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers.algorithms import AES
from cryptography.hazmat.primitives.ciphers.modes import CTR
from cryptography.hazmat.primitives.ciphers import Cipher
class SecureTemporaryFile(_TemporaryFileWrapper, object):
"""Temporary file that provides on-the-fly encryption.
Buffering large submissions in memory as they come in requires too
much memory for too long a period. By writing the file to disk as it
comes in using a stream cipher, we are able to minimize memory usage
as submissions come in, while minimizing the chances of plaintext
recovery through forensic disk analysis. They key used to encrypt
each secure temporary file is also ephemeral, and is only stored in
memory only for as long as needed.
Adapted from Globaleaks' GLSecureTemporaryFile:
https://github.com/globaleaks/GlobaLeaks/blob/master/backend/globaleaks/security.py#L35
WARNING: you can't use this like a normal file object. It supports
being appended to however many times you wish (although content may not be
overwritten), and then it's contents may be read only once (although it may
be done in chunks) and only after it's been written to.
"""
AES_key_size = 256
AES_block_size = 128
def __init__(self, store_dir):
"""Generates an AES key and an initialization vector, and opens
a file in the `store_dir` directory with a
pseudorandomly-generated filename.
Args:
store_dir (str): the directory to create the secure
temporary file under.
Returns: self
"""
self.last_action = 'init'
self.create_key()
data = base64.urlsafe_b64encode(os.urandom(32))
if not six.PY2: # For Python3
self.tmp_file_id = data.decode('utf-8').strip('=')
else:
self.tmp_file_id = data.strip('=')
self.filepath = os.path.join(store_dir,
'{}.aes'.format(self.tmp_file_id))
self.file = io.open(self.filepath, 'w+b')
super(SecureTemporaryFile, self).__init__(self.file, self.filepath)
def create_key(self):
"""Generates a unique, pseudorandom AES key, stored ephemerally in
memory as an instance attribute. Its destruction is ensured by the
automatic nightly reboots of the SecureDrop application server combined
with the freed memory-overwriting PAX_MEMORY_SANITIZE feature of the
grsecurity-patched kernel it uses (for further details consult
https://github.com/freedomofpress/securedrop/pull/477#issuecomment-168445450).
"""
self.key = os.urandom(self.AES_key_size // 8)
self.iv = os.urandom(self.AES_block_size // 8)
self.initialize_cipher()
def initialize_cipher(self):
"""Creates the cipher-related objects needed for AES-CTR
encryption and decryption.
"""
self.cipher = Cipher(AES(self.key), CTR(self.iv), default_backend())
self.encryptor = self.cipher.encryptor()
self.decryptor = self.cipher.decryptor()
def write(self, data):
"""Write `data` to the secure temporary file. This method may be
called any number of times following instance initialization,
but after calling :meth:`read`, you cannot write to the file
again.
"""
if self.last_action == 'read':
raise AssertionError('You cannot write after reading!')
self.last_action = 'write'
# This is the old Python related code
if six.PY2: # noqa
if isinstance(data, unicode):
data = data.encode('utf-8')
elif isinstance(data, str): # noqa
# For Python 3
data = data.encode('utf-8')
self.file.write(self.encryptor.update(data))
def read(self, count=None):
"""Read `data` from the secure temporary file. This method may
be called any number of times following instance initialization
and once :meth:`write has been called at least once, but not
before.
Before the first read operation, `seek(0, 0)` is called. So
while you can call this method any number of times, the full
contents of the file can only be read once. Additional calls to
read will return an empty str, which is desired behavior in that
it matches :class:`file` and because other modules depend on
this behavior to let them know they've reached the end of the
file.
Args:
count (int): the number of bytes to try to read from the
file from the current position.
"""
if self.last_action == 'init':
raise AssertionError('You must write before reading!')
if self.last_action == 'write':
self.seek(0, 0)
self.last_action = 'read'
if count:
return self.decryptor.update(self.file.read(count))
else:
return self.decryptor.update(self.file.read())
def close(self):
"""The __del__ method in tempfile._TemporaryFileWrapper (which
SecureTemporaryFile class inherits from) calls close() when the
temporary file is deleted.
"""
try:
self.decryptor.finalize()
except AlreadyFinalized:
pass
# Since tempfile._TemporaryFileWrapper.close() does other cleanup,
# (i.e. deleting the temp file on disk), we need to call it also.
super(SecureTemporaryFile, self).close()
# python-gnupg will not recognize our SecureTemporaryFile as a stream-like type
# and will attempt to call encode on it, thinking it's a string-like type. To
# avoid this we append it the list of stream-like types.
_STREAMLIKE_TYPES.append(_TemporaryFileWrapper)
|
ehartsuyker/securedrop
|
securedrop/secure_tempfile.py
|
Python
|
agpl-3.0
| 6,113
|
# -*- coding: utf-8 -*-
#
# 2017-01-23 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Avoid XML bombs
# 2016-07-17 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add GPG encrpyted import
# 2016-01-16 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add PSKC import with pre shared key
# 2015-05-28 Cornelius Kölbel <cornelius.koelbel@netknights.it>
# Add PSKC import
# 2014-12-11 Cornelius Kölbel <cornelius@privacyidea.org>
# code cleanup during flask migration
# 2014-10-27 Cornelius Kölbel <cornelius@privacyidea.org>
# add parsePSKCdata
# 2014-05-08 Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''This file is part of the privacyidea service
It is used for importing SafeNet (former Aladdin)
XML files, that hold the OTP secrets for eToken PASS.
'''
import defusedxml.ElementTree as etree
import re
import binascii
import base64
from privacyidea.lib.utils import modhex_decode
from privacyidea.lib.utils import modhex_encode
from privacyidea.lib.log import log_with
from privacyidea.lib.crypto import aes_decrypt
from Crypto.Cipher import AES
from bs4 import BeautifulSoup
import traceback
from passlib.utils.pbkdf2 import pbkdf2
from privacyidea.lib.utils import to_utf8
import gnupg
import logging
log = logging.getLogger(__name__)
def _create_static_password(key_hex):
'''
According to yubikey manual 5.5.5 the static-ticket is the same
algorithm with no moving factors.
The msg_hex that is encoded with the AES key is
'000000000000ffffffffffffffff0f2e'
'''
msg_hex = "000000000000ffffffffffffffff0f2e"
msg_bin = binascii.unhexlify(msg_hex)
aes = AES.new(binascii.unhexlify(key_hex), AES.MODE_ECB)
password_bin = aes.encrypt(msg_bin)
password = modhex_encode(password_bin)
return password
class ImportException(Exception):
def __init__(self, description):
self.description = description
def __str__(self):
return ('{0!s}'.format(self.description))
def getTagName(elem):
match = re.match("^({.*?})(.*)$", elem.tag)
if match:
return match.group(2)
else:
return elem.tag
@log_with(log)
def parseOATHcsv(csv):
'''
(#653)
This function parses CSV data for oath token.
The file format is
serial, key, [hotp,totp], [6,8], [30|60],
serial, key, ocra, [ocra-suite]
It imports sha1 hotp or totp token.
I can also import ocra token.
The default is hotp
if totp is set, the default seconds are 30
if ocra is set, an ocra-suite is required, otherwise the default
ocra-suite is used.
It returns a dictionary:
{
serial: { 'type' : xxxx,
'otpkey' : xxxx,
'timeStep' : xxxx,
'otplen' : xxx,
'ocrasuite' : xxx }
}
'''
TOKENS = {}
csv_array = csv.split('\n')
log.debug("the file contains {0:d} tokens.".format(len(csv_array)))
for line in csv_array:
l = line.split(',')
serial = ""
key = ""
ttype = "hotp"
seconds = 30
otplen = 6
hashlib = "sha1"
ocrasuite = ""
serial = l[0].strip()
# check for empty line
if len(serial) > 0 and not serial.startswith('#'):
if len(l) >= 2:
key = l[1].strip()
if len(key) == 32:
hashlib = "sha256"
else:
log.error("the line {0!s} did not contain a hotp key".format(line))
continue
# ttype
if len(l) >= 3:
ttype = l[2].strip().lower()
# otplen or ocrasuite
if len(l) >= 4:
if ttype != "ocra":
otplen = int(l[3].strip())
elif ttype == "ocra":
ocrasuite = l[3].strip()
# timeStep
if len(l) >= 5:
seconds = int(l[4].strip())
log.debug("read the line |{0!s}|{1!s}|{2!s}|{3:d} {4!s}|{5:d}|".format(serial, key, ttype, otplen, ocrasuite, seconds))
TOKENS[serial] = {'type': ttype,
'otpkey': key,
'timeStep': seconds,
'otplen': otplen,
'hashlib': hashlib,
'ocrasuite': ocrasuite
}
return TOKENS
@log_with(log)
def parseYubicoCSV(csv):
'''
This function reads the CSV data as created by the Yubico personalization
GUI.
Traditional Format:
Yubico OTP,12/11/2013 11:10,1,vvgutbiedkvi,
ab86c04de6a3,d26a7c0f85fdda28bd816e406342b214,,,0,0,0,0,0,0,0,0,0,0
OATH-HOTP,11.12.13 18:55,1,cccccccccccc,,
916821d3a138bf855e70069605559a206ba854cd,,,0,0,0,6,0,0,0,0,0,0
Static Password,11.12.13 19:08,1,,d5a3d50327dc,
0e8e37b0e38b314a56748c030f58d21d,,,0,0,0,0,0,0,0,0,0,0
Yubico Format:
# OATH mode
508326,,0,69cfb9202438ca68964ec3244bfa4843d073a43b,,2013-12-12T08:41:07,
1382042,,0,bf7efc1c8b6f23604930a9ce693bdd6c3265be00,,2013-12-12T08:41:17,
# Yubico mode
508326,cccccccccccc,83cebdfb7b93,a47c5bf9c152202f577be6721c0113af,,
2013-12-12T08:43:17,
# static mode
508326,,,9e2fd386224a7f77e9b5aee775464033,,2013-12-12T08:44:34,
column 0: serial
column 1: public ID in yubico mode
column 2: private ID in yubico mode, 0 in OATH mode, blank in static mode
column 3: AES key
BUMMER: The Yubico Format does not contain the information,
which slot of the token was written.
If now public ID or serial is given, we can not import the token, as the
returned dictionary needs the token serial as a key.
It returns a dictionary with the new tokens to be created:
{
serial: { 'type' : yubico,
'otpkey' : xxxx,
'otplen' : xxx,
'description' : xxx
}
}
'''
TOKENS = {}
csv_array = csv.split('\n')
log.debug("the file contains {0:d} tokens.".format(len(csv_array)))
for line in csv_array:
l = line.split(',')
serial = ""
key = ""
otplen = 32
public_id = ""
slot = ""
if len(l) >= 6:
first_column = l[0].strip()
if first_column.lower() in ["yubico otp",
"oath-hotp",
"static password"]:
# traditional format
typ = l[0].strip()
slot = l[2].strip()
public_id = l[3].strip()
key = l[5].strip()
if public_id == "":
# Usually a "static password" does not have a public ID!
# So we would bail out here for static passwords.
log.warning("No public ID in line {0!r}".format(line))
continue
serial_int = int(binascii.hexlify(modhex_decode(public_id)),
16)
if typ.lower() == "yubico otp":
ttype = "yubikey"
otplen = 32 + len(public_id)
serial = "UBAM{0:08d}_{1!s}".format(serial_int, slot)
TOKENS[serial] = {'type': ttype,
'otpkey': key,
'otplen': otplen,
'description': public_id
}
elif typ.lower() == "oath-hotp":
'''
WARNING: this does not work out at the moment, since the
Yubico GUI either
1. creates a serial in the CSV, but then the serial is
always prefixed! We can not authenticate with this!
2. if it does not prefix the serial there is no serial in
the CSV! We can not import and assign the token!
'''
ttype = "hotp"
otplen = 6
serial = "UBOM{0:08d}_{1!s}".format(serial_int, slot)
TOKENS[serial] = {'type': ttype,
'otpkey': key,
'otplen': otplen,
'description': public_id
}
else:
log.warning("at the moment we do only support Yubico OTP"
" and HOTP: %r" % line)
continue
elif first_column.isdigit():
# first column is a number, (serial number), so we are
# in the yubico format
serial = first_column
# the yubico format does not specify a slot
slot = "X"
key = l[3].strip()
if l[2].strip() == "0":
# HOTP
typ = "hotp"
serial = "UBOM{0!s}_{1!s}".format(serial, slot)
otplen = 6
elif l[2].strip() == "":
# Static
typ = "pw"
serial = "UBSM{0!s}_{1!s}".format(serial, slot)
key = _create_static_password(key)
otplen = len(key)
log.warning("We can not enroll a static mode, since we do"
" not know the private identify and so we do"
" not know the static password.")
continue
else:
# Yubico
typ = "yubikey"
serial = "UBAM{0!s}_{1!s}".format(serial, slot)
public_id = l[1].strip()
otplen = 32 + len(public_id)
TOKENS[serial] = {'type': typ,
'otpkey': key,
'otplen': otplen,
'description': public_id
}
else:
log.warning("the line {0!r} did not contain a enough values".format(line))
continue
return TOKENS
@log_with(log)
def parseSafeNetXML(xml):
"""
This function parses XML data of a Aladdin/SafeNet XML
file for eToken PASS
It returns a dictionary of
serial : { otpkey , counter, type }
"""
TOKENS = {}
elem_tokencontainer = etree.fromstring(xml)
if getTagName(elem_tokencontainer) != "Tokens":
raise ImportException("No toplevel element Tokens")
for elem_token in list(elem_tokencontainer):
SERIAL = None
COUNTER = None
HMAC = None
DESCRIPTION = None
if getTagName(elem_token) == "Token":
SERIAL = elem_token.get("serial")
log.debug("Found token with serial {0!s}".format(SERIAL))
for elem_tdata in list(elem_token):
tag = getTagName(elem_tdata)
if "ProductName" == tag:
DESCRIPTION = elem_tdata.text
log.debug("The Token with the serial %s has the "
"productname %s" % (SERIAL, DESCRIPTION))
if "Applications" == tag:
for elem_apps in elem_tdata:
if getTagName(elem_apps) == "Application":
for elem_app in elem_apps:
tag = getTagName(elem_app)
if "Seed" == tag:
HMAC = elem_app.text
if "MovingFactor" == tag:
COUNTER = elem_app.text
if not SERIAL:
log.error("Found token without a serial")
else:
if HMAC:
hashlib = "sha1"
if len(HMAC) == 64:
hashlib = "sha256"
TOKENS[SERIAL] = {'otpkey': HMAC,
'counter': COUNTER,
'type': 'hotp',
'hashlib': hashlib
}
else:
log.error("Found token {0!s} without a element 'Seed'".format(
SERIAL))
return TOKENS
def strip_prefix_from_soup(xml_soup):
"""
We strip prefixes from the XML tags.
<pskc:encryption>
</pskc:encryption>
results in:
<encryption>
</encryption>
:param xml_soup: Beautiful Soup XML with tags with prefixes
:type xml_soup: Beautiful Soup object
:return: Beautiful Soup without prefixes in the tags
"""
# strip the prefixes from the tags!
for tag in xml_soup.findAll():
if tag.name.find(":") >= 1:
prefix, name = tag.name.split(":")
tag.name = name
return xml_soup
def derive_key(xml, password):
"""
Derive the encryption key from the password with the parameters given
in the XML soup.
:param xml: The XML
:param password: the password
:return: The derived key, hexlified
"""
if not password:
raise ImportException("The XML KeyContainer specifies a derived "
"encryption key, but no password given!")
keymeth= xml.keycontainer.encryptionkey.derivedkey.keyderivationmethod
derivation_algo = keymeth["algorithm"].split("#")[-1]
if derivation_algo.lower() != "pbkdf2":
raise ImportException("We only support PBKDF2 as Key derivation "
"function!")
salt = keymeth.find("salt").text.strip()
keylength = keymeth.find("keylength").text.strip()
rounds = keymeth.find("iterationcount").text.strip()
r = pbkdf2(to_utf8(password), base64.b64decode(salt), int(rounds),
int(keylength))
return binascii.hexlify(r)
@log_with(log)
def parsePSKCdata(xml_data,
preshared_key_hex=None,
password=None,
do_checkserial=False):
"""
This function parses XML data of a PSKC file, (RFC6030)
It can read
* AES-128-CBC encrypted (preshared_key_bin) data
* password based encrypted data
* plain text data
:param xml_data: The XML data
:type xml_data: basestring
:param preshared_key_hex: The preshared key, hexlified
:param password: The password that encrypted the keys
:param do_checkserial: Check if the serial numbers conform to the OATH
specification (not yet implemented)
:return: a dictionary of token dictionaries
{ serial : { otpkey , counter, .... }}
"""
tokens = {}
#xml = BeautifulSoup(xml_data, "lxml")
xml = strip_prefix_from_soup(BeautifulSoup(xml_data, "lxml"))
if xml.keycontainer.encryptionkey and \
xml.keycontainer.encryptionkey.derivedkey:
# If we have a password we also need a tag EncryptionKey in the
# KeyContainer
preshared_key_hex = derive_key(xml, password)
key_packages = xml.keycontainer.findAll("keypackage")
for key_package in key_packages:
token = {}
key = key_package.key
try:
token["description"] = key_package.deviceinfo.manufacturer.string
except Exception as exx:
log.debug("Can not get manufacturer string {0!s}".format(exx))
serial = key["id"]
try:
serial = key_package.deviceinfo.serialno.string
except Exception as exx:
log.debug("Can not get serial string from device info {0!s}".format(exx))
algo = key["algorithm"]
token["type"] = algo[-4:].lower()
parameters = key.algorithmparameters
token["otplen"] = parameters.responseformat["length"] or 6
try:
if key.data.secret.plainvalue:
secret = key.data.secret.plainvalue.string
token["otpkey"] = binascii.hexlify(base64.b64decode(secret))
elif key.data.secret.encryptedvalue:
encryptionmethod = key.data.secret.encryptedvalue.encryptionmethod
enc_algorithm = encryptionmethod["algorithm"].split("#")[-1]
if enc_algorithm.lower() != "aes128-cbc":
raise ImportException("We only import PSKC files with "
"AES128-CBC.")
enc_data = key.data.secret.encryptedvalue.ciphervalue.text
enc_data = base64.b64decode(enc_data.strip())
enc_iv = enc_data[:16]
enc_cipher = enc_data[16:]
secret = aes_decrypt(binascii.unhexlify(preshared_key_hex),
enc_iv, enc_cipher)
token["otpkey"] = binascii.hexlify(secret)
except Exception as exx:
log.error("Failed to import tokendata: {0!s}".format(exx))
log.debug(traceback.format_exc())
raise ImportException("Failed to import tokendata. Wrong "
"encryption key? %s" % exx)
if token["type"] == "hotp" and key.data.counter:
token["counter"] = key.data.counter.text.strip()
elif token["type"] == "totp" and key.data.timeinterval:
token["timeStep"] = key.data.timeinterval.text.strip()
tokens[serial] = token
return tokens
class GPGImport(object):
"""
This class is used to decrypt GPG encrypted import files.
The decrypt method returns the unencrpyted files.
Create the keypair like this:
GNUPGHOME=/etc/privacyidea/gpg gpg --gen-key
"""
def __init__(self, config=None):
self.config = config or {}
self.gnupg_home = self.config.get("PI_GNUPG_HOME",
"/etc/privacyidea/gpg")
self.gpg = gnupg.GPG(gnupghome=self.gnupg_home)
self.private_keys = self.gpg.list_keys(True)
def get_publickeys(self):
"""
This returns the public GPG key to be displayed in the Import Dialog.
The administrator can send this public key to his token vendor and
the token vendor can use this public key to encrypt the token import
file.
:return: a dictionary of public keys with fingerprint
"""
public_keys = {}
keys = self.gpg.list_keys(secret=True)
for key in keys:
ascii_armored_public_key = self.gpg.export_keys(key.get("keyid"))
public_keys[key.get("keyid")] = {"armor": ascii_armored_public_key,
"fingerprint": key.get(
"fingerprint")}
return public_keys
def decrypt(self, input_data):
"""
Decrypts the input data with one of the private keys
:param input_data:
:return:
"""
decrypted = self.gpg.decrypt(message=input_data)
if not decrypted.ok:
log.error(u"Decrpytion failed: {0!s}. {1!s}".format(
decrypted.status, decrypted.stderr))
raise Exception(decrypted.stderr)
return decrypted.data
|
wheldom01/privacyidea
|
privacyidea/lib/importotp.py
|
Python
|
agpl-3.0
| 20,360
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2012-2014 Didotech (<http://www.didotech.com>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class asset_assign_category(orm.TransientModel):
_name = 'asset.assign.category'
_description = 'Assign category to a new asset product'
_columns = {
'category_id': fields.many2one('asset.category', 'Asset Category', required=False),
}
def assign_category(self, cr, uid, ids, context=None):
category = self.browse(cr, uid, ids, context=context)[0].category_id
# add row to assets_product table
asset_product_id = self.pool['asset.product'].create(cr, uid, {
'create_uid': uid,
'has_date_option': False,
'asset_category_id': category.id,
'product_product_id': context['product_id'],
})
## create asset.asset
self.pool['asset.asset'].create(cr, uid, {
'asset_product_id': asset_product_id,
'serial_number': context['serial_number'],
'company_id': context['company_id'],
'location': context['location'],
'has_date_option': False,
})
new_context = {
'lang': context['lang'],
'tz': context['tz'],
'uid': context['uid'],
'section_id': context['section_id'],
'project_id': context['project_id'],
'department_id': context['department_id'],
'asset_created': True
}
self.pool.get('stock.move').action_done(cr, uid, context['move_ids'], new_context)
return {'type': 'ir.actions.act_window_close'}
|
odoousers2014/LibrERP
|
material_asset/wizard/asset_assign_category.py
|
Python
|
agpl-3.0
| 2,528
|
from ..utils import *
##
# Minions
class AT_019:
"Dreadsteed"
deathrattle = Summon(CONTROLLER, "AT_019")
class AT_021:
"Tiny Knight of Evil"
events = Discard(FRIENDLY).on(Buff(SELF, "AT_021e"))
AT_021e = buff(+1, +1)
class AT_023:
"Void Crusher"
inspire = Destroy(RANDOM_ENEMY_MINION | RANDOM_FRIENDLY_MINION)
class AT_026:
"Wrathguard"
events = Damage(SELF).on(Hit(FRIENDLY_HERO, Damage.AMOUNT))
class AT_027:
"Wilfred Fizzlebang"
events = Draw(CONTROLLER, None, FRIENDLY_HERO_POWER).on(Buff(Draw.CARD, "AT_027e"))
class AT_027e:
cost = SET(0)
##
# Spells
class AT_022:
"Fist of Jaraxxus"
play = Hit(RANDOM_ENEMY_CHARACTER, 4)
class Hand:
events = Discard(SELF).on(Hit(RANDOM_ENEMY_CHARACTER, 4))
class AT_024:
"Demonfuse"
play = Buff(TARGET, "AT_024e"), GainMana(OPPONENT, 1)
AT_024e = buff(+3, +3)
class AT_025:
"Dark Bargain"
play = Destroy(RANDOM(ENEMY_MINIONS) * 2), Discard(RANDOM(FRIENDLY_HAND) * 2)
|
beheh/fireplace
|
fireplace/cards/tgt/warlock.py
|
Python
|
agpl-3.0
| 950
|
from coalib.bearlib.abstractions.Linter import linter
from coalib.bears.requirements.GoRequirement import GoRequirement
@linter(executable='gofmt',
use_stdin=True,
output_format='corrected',
result_message='Formatting can be improved.')
class GofmtBear:
"""
Suggest better formatting options in Go code. Basic checks like alignment,
indentation, and redundant parentheses are provided.
This is done using the ``gofmt`` utility. For more information visit
<https://golang.org/cmd/gofmt/>.
"""
LANGUAGES = {'Go'}
REQUIREMENTS = {GoRequirement(package='golang.org/cmd/gofmt', flag='-u')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_FIX = {'Formatting'}
ASCIINEMA_URL = 'https://asciinema.org/a/94812'
@staticmethod
def create_arguments(filename, file, config_file):
return ()
|
sounak98/coala-bears
|
bears/go/GofmtBear.py
|
Python
|
agpl-3.0
| 935
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# Copyright (C) 2004-2012 Micronaet srl. All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields,osv
import netsvc
class intervent_report_collection_wizard(osv.osv_memory):
''' Wizard that permit to:
select type of report
Filter element by some value (user, customer etc.)
The wizard return aeroo report selected
'''
_name = "intervent.report.collection.wizard"
# On change function:
def on_change_month(self, cr, uid, ids, month, context=None):
'''
'''
import time
res={'value':{}}
if month:
res['value']={'from_date':"%s-%02d-01 00:00:00"%(time.strftime('%Y'), int(month)),
'to_date': "%04d-%02d-01 00:00:00"%(int(time.strftime('%Y')) if month != "12" else int(time.strftime('%Y')) + 1, int(month) + 1 if month!="12" else 1,)}
return res
# Button function of the wizard:
# Extra function usefull:
def _get_action_report_to_return(self, cr, uid, ids, intervent_ids, report_name, title, context=None):
''' Compose action to return according to passed: intervent_ids, report_name, title
'''
datas = {'ids': intervent_ids,}
datas['model'] = 'intervention.report'
datas['form'] = self.read(cr, uid, ids)[0]
datas['title']= title
# return action:
return {
'type': 'ir.actions.report.xml',
'report_name': report_name, # TODO one for all??
'datas': datas,
}
def _get_filter_from_wizard(self, cr, uid, ids, with_partner=True, context=None):
''' In wizard passed get the filter selected in normal domain format
the with_partner parameter is used for parametrize this function
for filter report or for load all partner of the filtered values
(in this case, obviously there's no selection...)
'''
if context is None:
context = {}
wiz_proxy = self.browse(cr, uid, ids)[0]
domain=[]
if wiz_proxy.partner_id and with_partner:
domain += [('partner_id','=',wiz_proxy.partner_id.id)]
if wiz_proxy.user_id:
domain += [('user_id','=',wiz_proxy.user_id.id)]
if wiz_proxy.from_date:
domain += [('date_start','>=',"%s %s"%(wiz_proxy.from_date, "00:00:00"))]
if wiz_proxy.to_date:
domain += [('date_start','<',"%s %s"%(wiz_proxy.to_date, "00:00:00"))]
if wiz_proxy.is_intervent: # only intervent
domain += [('state','in', ('confirmed','closed','reported'))]
if wiz_proxy.is_closed: # only intervent
domain += [('state','in', ('closed','reported'))]
return domain
def _get_report_parameter_for_action(self, group_by):
''' Return report parameter: (report_name, title, order)
according to group_by clause passed
'''
if group_by == 'state': # group state
return ("intervent_report_state", "Intervent report list (group by state)", "partner_id,ref,date_start",)
elif group_by == 'tipology': # group tipology
return ("intervent_report_tipology", "Intervent report list (group by tipology)", "tipology_id,date_start",)
elif group_by == 'partner': # group tipology
return ("intervent_report_partner", "Intervent grouped by tipology" ,"partner_id,date")
elif group_by == 'list': # group tipology
return ("intervent_report", "Intervent report list (group by state)", "partner_id,date_start")
else: # no report (case impossible)
return (False,False,False) # comunicate error
def print_load_partner(self, cr, uid, ids, context=None):
''' Test filter selected and get partner list for intervent
'''
domain=self._get_filter_from_wizard(cr, uid, ids, with_partner=False, context=context)
intervent_ids = self.pool.get('intervention.report').search(cr, uid, domain, context=context)
intervent_proxy = self.pool.get('intervention.report').browse(cr, uid, intervent_ids, context=context)
partner_ids=[]
for intervent in intervent_proxy:
if intervent.partner_id and intervent.partner_id.id not in partner_ids:
partner_ids.append(intervent.partner_id.id)
if intervent_ids:
# write new list of elements
self.write(cr, uid, ids, {'partner_ids': [(6, 0, partner_ids)]})
return True
def print_save_partner_report(self, cr, uid, ids, context=None):
''' Save partner report (state and intervent) for that period
Call each partner (pre loaded) and save for each of it 2 report
status report and intervent report
'''
import time, base64, xmlrpclib
user_proxy=self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0]
db=cr.dbname
uid=uid
pwd=user_proxy.password
model = 'intervention.report'
#report_name="intervent_report"
report_to_print={}
report_to_print['state'] = self._get_report_parameter_for_action('state')
report_to_print['list'] = self._get_report_parameter_for_action('list')
wiz_proxy=self.browse(cr, uid, ids)[0]
printsock = xmlrpclib.ServerProxy('http://localhost:8069/xmlrpc/report')
#domain = self._get_filter_from_wizard(cr, uid, ids, with_partner=False, context=context)
for partner in wiz_proxy.partner_ids: #self.pool.get('intervention.report').search(cr, uid, domain, order=order, context=context)
# get intervent_ids for this partner
domain = self._get_filter_from_wizard(cr, uid, ids, with_partner=False, context=context)
domain += [('partner_id', '=', partner.id)]
for key in report_to_print.keys():
(report_name, title, order) = report_to_print[key]
intervent_ids = self.pool.get(model).search(cr, uid, domain, order=order, context=context)
#self._get_action_report_to_return(cr, uid, ids, intervent_ids, report_name, title, context=context)
if intervent_ids:
action=self._get_action_report_to_return(cr, uid, ids, intervent_ids, report_name, title, context=context)
action['report_type']='pdf'
action['model']=model
id_report = printsock.report(db, uid, pwd, report_name, intervent_ids, action)#ids {'model': model, 'report_type':'pdf'})
time.sleep(5)
state = False
attempt = 0
while not state:
report = printsock.report_get(db, uid, pwd, id_report)
state = report['state']
if not state:
time.sleep(1)
attempt += 1
if attempt>200:
print 'Printing aborted, too long delay !'
string_pdf = base64.decodestring(report['result'])
file_name='/home/administrator/pdf/%s %s - %s.pdf'%(wiz_proxy.month, partner.name, "Riepilogo" if key=="state" else "Rapportini")
file_pdf = open(file_name,'w')
file_pdf.write(string_pdf)
file_pdf.close()
return False
def print_intervent_report(self, cr, uid, ids, context=None):
''' With filter parameter select all intervent searched
Create data element to call after the aeroo report
'''
if context is None:
context = {}
wiz_proxy = self.browse(cr, uid, ids)[0]
order="date_start" # TODO depend on report (put in parser??)
title="RIEPILOGO ORE DIVISO PER STATO INTERVENTO:"
# Domain filter: *******************************************************
# get the filter
domain = self._get_filter_from_wizard(cr, uid, ids, with_partner=True, context=context)
# get the parameter:
(report_name, title, order) = self._get_report_parameter_for_action(wiz_proxy.report_type)
# get the intervent list according to filter and parameter:
intervent_ids = self.pool.get('intervention.report').search(cr, uid, domain, order=order, context=context)
# create datas element and return report action:
if intervent_ids:
return self._get_action_report_to_return(cr, uid, ids, intervent_ids, report_name, title, context=context)
return False # error no intervent!
_columns = {
'user_id': fields.many2one('res.users', 'User', required=False),
'partner_id': fields.many2one('res.partner', 'Partner', required=False),
'partner_ids':fields.many2many('res.partner', 'intervent_partner_rel', 'intervent_id', 'partner_id', 'Partner', help="List of partner for the period selected"),
'month': fields.selection([
('01','January'),
('02','February'),
('03','March'),
('04','April'),
('05','May'),
('06','June'),
('07','July'),
('08','August'),
('09','September'),
('10','October'),
('11','November'),
('12','December'),
],'Month', select=True, readonly=False),
'from_date': fields.date('From date (>=)',),
'to_date': fields.date('To date (<)',),
'is_intervent': fields.boolean("Is intervent"),
'is_closed': fields.boolean("Is closed"),
'report_type': fields.selection([('state', 'Report by state'),
('tipology', 'Report by tipology'),
('partner', 'Report by customer'),
('list','List of intervent')], 'Report type (group by)', readonly=False, required=True,),
}
_defaults = {
'is_intervent': lambda *x: False,
'is_closed': lambda *x: False,
}
intervent_report_collection_wizard()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Micronaet/micronaet-addons-private
|
task_manager/wizard/wizard_report.py
|
Python
|
agpl-3.0
| 11,463
|
# -*- coding: utf-8 -*-
## Copyright 2015 Rasmus Scholer Sorensen, rasmusscholer@gmail.com
##
## This file is part of Nascent.
##
## Nascent is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as
## published by the Free Software Foundation, either version 3 of the
## License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
##
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
#pylint: disable=C0103,C0111,W0613
from __future__ import absolute_import, print_function, division
from pprint import pprint
do_print = False
def print_debug(*args, **kwargs):
""" Change module-level do_print variable to toggle behaviour. """
if 'origin' in kwargs:
del kwargs['origin']
if do_print:
print(*args, **kwargs)
def pprint_debug(*args, **kwargs):
if do_print:
pprint(*args, **kwargs)
def info_print(*args, **kwargs):
""" Will print the file and line before printing. Can be used to find spurrious print statements. """
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe().f_back)
print(frameinfo.filename, frameinfo.lineno)
pprint(*args, **kwargs)
def info_pprint(*args, **kwargs):
""" Will print the file and line before printing the variable. """
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe().f_back)
print(frameinfo.filename, frameinfo.lineno)
pprint(*args, **kwargs)
pprintd = pprint_debug
printd = print_debug
|
scholer/nascent
|
nascent/graph_sim_nx/debug.py
|
Python
|
agpl-3.0
| 1,923
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
from openfisca_france_data import default_config_files_directory as config_files_directory
from openfisca_france_data.input_data_builders.build_openfisca_survey_data import ( # analysis:ignore
step_01_pre_processing as pre_processing,
step_02_imputation_loyer as imputation_loyer,
step_03_fip as fip,
step_04_famille as famille,
step_05_foyer as foyer,
step_06_rebuild as rebuild,
step_07_invalides as invalides,
step_08_final as final,
)
from openfisca_france_data.temporary import get_store
from openfisca_survey_manager.surveys import Survey
from openfisca_survey_manager.survey_collections import SurveyCollection
log = logging.getLogger(__name__)
def run_all(year = None, check = False):
assert year is not None
pre_processing.create_indivim_menagem(year = year)
pre_processing.create_enfants_a_naitre(year = year)
# try:
# imputation_loyer.imputation_loyer(year = year)
# except Exception, e:
# log.info('Do not impute loyer because of the following error: \n {}'.format(e))
# pass
fip.create_fip(year = year)
famille.famille(year = year)
foyer.sif(year = year)
foyer.foyer_all(year = year)
rebuild.create_totals_first_pass(year = year)
rebuild.create_totals_second_pass(year = year)
rebuild.create_final(year = year)
invalides.invalide(year = year)
final.final(year = year, check = check)
temporary_store = get_store(file_name = 'erfs')
data_frame = temporary_store['input_{}'.format(year)]
# Saving the data_frame
openfisca_survey_collection = SurveyCollection(name = "openfisca", config_files_directory = config_files_directory)
output_data_directory = openfisca_survey_collection.config.get('data', 'output_directory')
survey_name = "openfisca_data_{}".format(year)
table = "input"
hdf5_file_path = os.path.join(os.path.dirname(output_data_directory), "{}.h5".format(survey_name))
survey = Survey(
name = survey_name,
hdf5_file_path = hdf5_file_path,
)
survey.insert_table(name = table, data_frame = data_frame)
openfisca_survey_collection.surveys.append(survey)
collections_directory = openfisca_survey_collection.config.get('collections', 'collections_directory')
json_file_path = os.path.join(collections_directory, 'openfisca.json')
openfisca_survey_collection.dump(json_file_path = json_file_path)
if __name__ == '__main__':
import time
start = time.time()
logging.basicConfig(level = logging.INFO, filename = 'run_all.log', filemode = 'w')
run_all(year = 2009, check = False)
log.info("Script finished after {}".format(time.time() - start))
print time.time() - start
|
MalkIPP/openfisca-france-data
|
openfisca_france_data/input_data_builders/build_openfisca_survey_data/run_all.py
|
Python
|
agpl-3.0
| 3,682
|
# -*- coding: utf-8 -*-
from django.db import migrations
def create_switch(apps, schema_editor):
"""Create the async_order_fulfillment switch if it does not already exist."""
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.get_or_create(name='async_order_fulfillment', defaults={'active': False})
def delete_switch(apps, schema_editor):
"""Delete the async_order_fulfillment switch."""
Switch = apps.get_model('waffle', 'Switch')
Switch.objects.filter(name='async_order_fulfillment').delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0006_add_service_user'),
('waffle', '0001_initial'),
]
operations = [
migrations.RunPython(create_switch, reverse_code=delete_switch),
]
|
edx/ecommerce
|
ecommerce/core/migrations/0007_auto_20151005_1333.py
|
Python
|
agpl-3.0
| 780
|
"""
Task performed by user in django view
"""
import functools
from django.core.urlresolvers import reverse
from django.conf.urls import url
from django.shortcuts import get_object_or_404
from ..activation import Activation, ViewActivation, STATUS
from ..exceptions import FlowRuntimeError
from . import base
def flow_view(**lock_args):
"""
Decorator that locks and runs the flow view in transaction.
Expects view with the signature `(request, activation, **kwargs)`
or CBV view that implements TaskActivation, in this case, dispatch
with would be called with `(request, **kwargs)`
Returns `(request, flow_task, process_pk, task_pk, **kwargs)`
"""
class flow_view_decorator(object):
def __init__(self, func, activation=None):
self.func = func
self.activation = activation
functools.update_wrapper(self, func)
def __call__(self, request, flow_cls, flow_task, process_pk, task_pk, **kwargs):
lock = flow_task.flow_cls.lock_impl(flow_task.flow_cls.instance, **lock_args)
with lock(flow_task.flow_cls, process_pk):
task = get_object_or_404(flow_task.flow_cls.task_cls._default_manager, pk=task_pk)
if self.activation:
"""
Class-based view that implements TaskActivation interface
"""
self.activation.initialize(flow_task, task)
return self.func(request, **kwargs)
else:
"""
Function based view or CBV without TaskActvation interface implementation
"""
activation = flow_task.activation_cls()
activation.initialize(flow_task, task)
return self.func(request, activation, **kwargs)
def __get__(self, instance, instancetype):
"""
If we decorate method on CBV that implements StartActivation interface,
no custom activation is required.
"""
if instance is None:
return self
func = self.func.__get__(instance, type)
activation = instance if isinstance(instance, ViewActivation) else None
return self.__class__(func, activation=activation)
return flow_view_decorator
class ManagedViewActivation(ViewActivation):
"""
Tracks task statistics in activation form
"""
management_form_cls = None
def __init__(self, **kwargs):
super(ManagedViewActivation, self).__init__(**kwargs)
self.management_form = None
self.management_form_cls = kwargs.pop('management_form_cls', None)
def get_management_form_cls(self):
if self.management_form_cls:
return self.management_form_cls
else:
return self.flow_cls.management_form_cls
@Activation.status.super()
def prepare(self, data=None, user=None):
super(ManagedViewActivation, self).prepare.original()
if user:
self.task.owner = user
management_form_cls = self.get_management_form_cls()
self.management_form = management_form_cls(data=data, instance=self.task)
if data:
if not self.management_form.is_valid():
raise FlowRuntimeError('Activation metadata is broken {}'.format(self.management_form.errors))
self.task = self.management_form.save(commit=False)
def has_perm(self, user):
return self.flow_task.can_execute(user, self.task)
@classmethod
def create_task(cls, flow_task, prev_activation, token):
task = ViewActivation.create_task(flow_task, prev_activation, token)
# Try to assign permission
owner_permission = flow_task.calc_owner_permission(task)
if owner_permission:
task.owner_permission = owner_permission
# Try to assign owner
owner = flow_task.calc_owner(task)
if owner:
task.owner = owner
task.status = STATUS.ASSIGNED
return task
class BaseView(base.TaskDescriptionMixin,
base.NextNodeMixin,
base.Task,
base.ViewArgsMixin):
"""
Base class for ViewTasks
"""
task_type = 'HUMAN'
activation_cls = ManagedViewActivation
def __init__(self, view_or_cls, **kwargs):
"""
Accepts view callable or CBV View class with view kwargs,
if CBV view implements ViewActivation, it used as activation_cls
"""
self._view, self._view_cls, self._view_args = None, None, None
if isinstance(view_or_cls, type):
self._view_cls = view_or_cls
if issubclass(view_or_cls, ViewActivation):
kwargs.setdefault('activation_cls', view_or_cls)
else:
self._view = view_or_cls
super(BaseView, self).__init__(view_or_cls=view_or_cls, **kwargs)
@property
def view(self):
if not self._view:
self._view = self._view_cls.as_view(**self._view_args)
return self._view
def urls(self):
return [url(r'^(?P<process_pk>\d+)/{}/(?P<task_pk>\d+)/$'.format(self.name),
self.view, {'flow_task': self}, name=self.name)]
class View(base.PermissionMixin,
base.UndoViewMixin,
base.CancelViewMixin,
base.DetailsViewMixin,
base.ActivateNextMixin,
BaseView):
"""
View task
Example::
task = flow.View(some_view) \\
.Permission('my_app.can_do_task') \\
.Next(this.next_task)
In case of function based view::
task = flow.Task(task)
@flow_start_view()
def task(request, activation):
if not activation.flow_task.has_perm(request.user):
raise PermissionDenied
activation.prepare(request.POST or None)
form = SomeForm(request.POST or None)
if form.is_valid():
form.save()
activation.done()
return redirect('/')
return render(request, {'activation': activation, 'form': form})
Ensure to include `{{ activation.management_form }}` inside template, to proper
track when task was started and other task performance statistics::
<form method="POST">
{{ form }}
{{ activation.management_form }}
<button type="submit"/>
</form>
"""
def __init__(self, *args, **kwargs):
self._assign_view = kwargs.pop('assign_view', None)
super(View, self).__init__(*args, **kwargs)
def Assign(self, owner=None, **owner_kwargs):
"""
Assign task to the User immediately on activation,
accepts user lookup kwargs or callable :: Process -> User::
.Assign(username='employee')
.Assign(lambda process: process.created_by)
"""
if owner:
self._owner = owner
else:
self._owner = owner_kwargs
return self
@property
def assign_view(self):
from viewflow.views import AssignView
return self._assign_view if self._assign_view else AssignView.as_view()
def urls(self):
urls = super(View, self).urls()
urls.append(url(r'^(?P<process_pk>\d+)/{}/(?P<task_pk>\d+)/assign/$'.format(self.name),
self.assign_view, {'flow_task': self}, name="{}__assign".format(self.name)))
return urls
def get_task_url(self, task, url_type, **kwargs):
user = kwargs.get('user', None)
# assign
if url_type in ['assign', 'guess']:
if task.status == STATUS.NEW and self.can_assign(user, task):
url_name = '{}:{}__assign'.format(self.flow_cls.instance.namespace, self.name)
return reverse(url_name, kwargs={'process_pk': task.process_id, 'task_pk': task.pk})
# execute
if url_type in ['execute', 'guess']:
if task.status == STATUS.ASSIGNED and self.can_execute(user, task):
url_name = '{}:{}'.format(self.flow_cls.instance.namespace, self.name)
return reverse(url_name, kwargs={'process_pk': task.process_id, 'task_pk': task.pk})
return super(View, self).get_task_url(task, url_type, **kwargs)
def calc_owner(self, task):
from django.contrib.auth import get_user_model
owner = self._owner
if callable(owner):
owner = owner(task.process)
elif isinstance(owner, dict):
owner = get_user_model() ._default_manager.get(**owner)
return owner
def calc_owner_permission(self, task):
owner_permission = self._owner_permission
if callable(owner_permission):
owner_permission = owner_permission(task.process)
return owner_permission
def can_assign(self, user, task):
if task.owner_id:
return False
if user.is_anonymous():
return False
if not task.owner_permission:
"""
Available for everyone
"""
return True
obj = None
if self._owner_permission_obj:
if callable(self._owner_permission_obj):
obj = self._owner_permission_obj(task.process)
else:
obj = self._owner_permission_obj
return user.has_perm(task.owner_permission, obj=obj)
def can_execute(self, user, task):
if task.owner_permission is None and task.owner is None:
return True
return task.owner == user
|
codingjoe/viewflow
|
viewflow/flow/task_view.py
|
Python
|
agpl-3.0
| 9,690
|
import os
import sys
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'askomics'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon']
master_doc = 'index'
# The suffix of source filenames.
source_suffix = ['.rst', '.md']
source_parsers = {
'.md': CommonMarkParser,
}
def run_apidoc(_):
from sphinx.apidoc import main
parent_folder = os.path.join(os.path.dirname(__file__), '..')
cur_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(parent_folder)
module = os.path.join(parent_folder, 'askomics')
output_path = os.path.join(cur_dir, 'api')
main(['-e', '-f', '-o', output_path, module])
def setup(app):
app.connect('builder-inited', run_apidoc)
|
askomics/askomics
|
docs/conf.py
|
Python
|
agpl-3.0
| 778
|
# -*- coding: utf-8 -*-
# ocitysmap, city map and street index generator from OpenStreetMap data
# Copyright (C) 2010 David Decotigny
# Copyright (C) 2010 Frédéric Lehobey
# Copyright (C) 2010 Pierre Mauduit
# Copyright (C) 2010 David Mentré
# Copyright (C) 2010 Maxime Petazzoni
# Copyright (C) 2010 Thomas Petazzoni
# Copyright (C) 2010 Gaël Utard
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# PT/metrics conversion routines
PT_PER_INCH = 72.0
def convert_pt_to_dots(pt, dpi = PT_PER_INCH):
return float(pt * dpi) / PT_PER_INCH
def convert_mm_to_pt(mm):
return ((mm/10.0) / 2.54) * 72
def convert_pt_to_mm(pt):
return (float(pt) * 10.0 * 2.54) / 72
|
dittaeva/ocitysmap
|
ocitysmap2/layoutlib/commons.py
|
Python
|
agpl-3.0
| 1,272
|
# Generated by Django 3.0.9 on 2020-08-16 20:47
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('groups', '0042_auto_20200507_1258'),
('activities', '0021_remove_activity_feedback_as_sum'),
]
operations = [
migrations.CreateModel(
name='ActivityType',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('created_at', models.DateTimeField(default=django.utils.timezone.now)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='activity_types', to='groups.Group')),
('name', models.CharField(max_length=80)),
('colour', models.CharField(max_length=6)),
('icon', models.CharField(max_length=32)),
('feedback_icon', models.CharField(max_length=32)),
('has_feedback', models.BooleanField(default=True)),
('has_feedback_weight', models.BooleanField(default=True)),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='activity',
name='activity_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='activities', to='activities.ActivityType'),
),
migrations.AddField(
model_name='activityseries',
name='activity_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='activity_series', to='activities.ActivityType'),
),
migrations.AlterUniqueTogether(
name='activitytype',
unique_together={('group', 'name')},
),
]
|
yunity/foodsaving-backend
|
karrot/activities/migrations/0022_add_activity_types.py
|
Python
|
agpl-3.0
| 1,901
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestIDCT(TestCase):
def testInvalidParam(self):
self.assertConfigureFails(IDCT(), { 'inputSize': 0, 'outputSize': 2 })
self.assertConfigureFails(IDCT(), { 'inputSize': 6, 'outputSize': 0 })
def testRegression(self):
# values from Matlab/Octave
inputArray = [ 0.89442718, -0.60150099, -0.12078822, -0.37174806, 0.82789522]
expected = [ 0, 0, 1, 0, 1 ]
self.assertAlmostEqualVector(IDCT(outputSize=len(expected), inputSize = len(inputArray))(inputArray), expected, 1e-6)
def testLifteringRegression(self):
# DCT III and Liftening computed using PLP and RASTA matlab toolbox.
# A big tolerance is necessary due to the smoothing caused by the smaller amount of bins in the DCT domain.
inputArray = [ 1.89736652, 0.95370573, 3.39358997, -3.35009956]
expected = [1, 1, 0, 0, 1]
self.assertAlmostEqualVector(IDCT(inputSize=len(inputArray),
outputSize=len(expected),
dctType = 3,
liftering = 22)(inputArray), expected, 1e0)
def testZero(self):
self.assertEqualVector(IDCT(outputSize=10)(zeros(5)), zeros(10))
def testInvalidInput(self):
self.assertComputeFails(IDCT(), []) # = testEmpty
self.assertComputeFails(IDCT(outputSize = 2, inputSize = 1), [ 0, 2, 4 ])
suite = allTests(TestIDCT)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
carthach/essentia
|
test/src/unittests/standard/test_idct.py
|
Python
|
agpl-3.0
| 2,364
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def add_criteria(apps, schema_editor):
Criterion = apps.get_model('activities', 'Criterion')
StudentClubYear = apps.get_model('core', 'StudentClubYear')
year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015,
end_date__year=2016)
Criterion.objects.create(year=year_2015_2016,
ar_name="التعاون مع الرئاسة",
code_name="presidency",
instructions="",
category='P')
Criterion.objects.create(year=year_2015_2016,
ar_name="رفع مبكر",
code_name="early_submission",
instructions="",
category='P')
Criterion.objects.create(year=year_2015_2016,
ar_name="تأجيل",
code_name="postponed",
instructions="",
category='P')
Criterion.objects.get(code_name='time', category='P').delete()
def remove_criteria(apps, schema_editor):
Criterion = apps.get_model('activities', 'Criterion')
StudentClubYear = apps.get_model('core', 'StudentClubYear')
year_2015_2016 = StudentClubYear.objects.get(start_date__year=2015,
end_date__year=2016)
Criterion.objects.filter(code_name__in=["presidency",
"early_submission", "postponed"]).delete()
class Migration(migrations.Migration):
dependencies = [
('activities', '0019_remove_alahsa_criteria'),
]
operations = [
migrations.RunPython(
add_criteria,
reverse_code=remove_criteria),
]
|
enjaz/enjaz
|
activities/migrations/0020_modify_citeria.py
|
Python
|
agpl-3.0
| 1,939
|
#===- cindex.py - Python Indexing Library Bindings -----------*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
#
#===------------------------------------------------------------------------===#
r"""
Clang Indexing Library Bindings
===============================
This module provides an interface to the Clang indexing library. It is a
low-level interface to the indexing library which attempts to match the Clang
API directly while also being "pythonic". Notable differences from the C API
are:
* string results are returned as Python strings, not CXString objects.
* null cursors are translated to None.
* access to child cursors is done via iteration, not visitation.
The major indexing objects are:
Index
The top-level object which manages some global library state.
TranslationUnit
High-level object encapsulating the AST for a single translation unit. These
can be loaded from .ast files or parsed on the fly.
Cursor
Generic object for representing a node in the AST.
SourceRange, SourceLocation, and File
Objects representing information about the input source.
Most object information is exposed using properties, when the underlying API
call is efficient.
"""
# TODO
# ====
#
# o API support for invalid translation units. Currently we can't even get the
# diagnostics on failure because they refer to locations in an object that
# will have been invalidated.
#
# o fix memory management issues (currently client must hold on to index and
# translation unit, or risk crashes).
#
# o expose code completion APIs.
#
# o cleanup ctypes wrapping, would be nice to separate the ctypes details more
# clearly, and hide from the external interface (i.e., help(cindex)).
#
# o implement additional SourceLocation, SourceRange, and File methods.
from ctypes import *
import collections
import sys
from hotdoc.extensions.c.clang import enumerations
# ctypes doesn't implicitly convert c_void_p to the appropriate wrapper
# object. This is a problem, because it means that from_parameter will see an
# integer and pass the wrong value on platforms where int != void*. Work around
# this by marshalling object arguments as void**.
c_object_p = POINTER(c_void_p)
if sys.version_info[0] > 2:
# Python 3 strings are unicode, translate them to/from utf8 for C-interop
# Python 3 replaces xrange with range, we want xrange behaviour
xrange = range
class c_string_p(c_char_p):
def __init__(self, p=None):
if type(p) == str:
p = p.encode("utf8")
super(c_char_p, self).__init__(p)
def __str__(self):
return str(self.value)
@property
def value(self):
if super(c_char_p, self).value is None:
return None
return super(c_char_p, self).value.decode("utf8")
@classmethod
def from_param(cls, param):
return cls(param)
else:
c_string_p = c_char_p
callbacks = {}
### Exception Classes ###
class TranslationUnitLoadError(Exception):
"""Represents an error that occurred when loading a TranslationUnit.
This is raised in the case where a TranslationUnit could not be
instantiated due to failure in the libclang library.
FIXME: Make libclang expose additional error information in this scenario.
"""
pass
class TranslationUnitSaveError(Exception):
"""Represents an error that occurred when saving a TranslationUnit.
Each error has associated with it an enumerated value, accessible under
e.save_error. Consumers can compare the value with one of the ERROR_
constants in this class.
"""
# Indicates that an unknown error occurred. This typically indicates that
# I/O failed during save.
ERROR_UNKNOWN = 1
# Indicates that errors during translation prevented saving. The errors
# should be available via the TranslationUnit's diagnostics.
ERROR_TRANSLATION_ERRORS = 2
# Indicates that the translation unit was somehow invalid.
ERROR_INVALID_TU = 3
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if enumeration < 1 or enumeration > 3:
raise Exception("Encountered undefined TranslationUnit save error "
"constant: %d. Please file a bug to have this "
"value supported." % enumeration)
self.save_error = enumeration
Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
### Structures and Utility Classes ###
class CachedProperty(object):
"""Decorator that lazy-loads the value of a property.
The first time the property is accessed, the original property function is
executed. The value it returns is set as the new value of that instance's
property, replacing the original method.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
try:
self.__doc__ = wrapped.__doc__
except:
pass
def __get__(self, instance, instance_type=None):
if instance is None:
return self
value = self.wrapped(instance)
setattr(instance, self.wrapped.__name__, value)
return value
class _CXString(Structure):
"""Helper for transforming CXString results."""
_fields_ = [("spelling", c_string_p), ("free", c_int)]
def __del__(self):
return
conf.lib.clang_disposeString(self)
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, _CXString)
return conf.lib.clang_getCString(res)
class SourceLocation(Structure):
"""
A SourceLocation represents a particular location within a source file.
"""
_fields_ = [("ptr_data", c_void_p * 2), ("int_data", c_uint)]
_data = None
def _get_instantiation(self):
if self._data is None:
f, l, c, o = c_object_p(), c_uint(), c_uint(), c_uint()
conf.lib.clang_getInstantiationLocation(self, byref(f), byref(l),
byref(c), byref(o))
if f:
f = File(f)
else:
f = None
self._data = (f, int(l.value), int(c.value), int(o.value))
return self._data
@staticmethod
def from_position(tu, file, line, column):
"""
Retrieve the source location associated with a given file/line/column in
a particular translation unit.
"""
return conf.lib.clang_getLocation(tu, file, line, column)
@staticmethod
def from_offset(tu, file, offset):
"""Retrieve a SourceLocation from a given character offset.
tu -- TranslationUnit file belongs to
file -- File instance to obtain offset from
offset -- Integer character offset within file
"""
return conf.lib.clang_getLocationForOffset(tu, file, offset)
@property
def file(self):
"""Get the file represented by this source location."""
return self._get_instantiation()[0]
@property
def line(self):
"""Get the line represented by this source location."""
return self._get_instantiation()[1]
@property
def column(self):
"""Get the column represented by this source location."""
return self._get_instantiation()[2]
@property
def offset(self):
"""Get the file offset represented by this source location."""
return self._get_instantiation()[3]
def __eq__(self, other):
return conf.lib.clang_equalLocations(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
if self.file:
filename = self.file.name
else:
filename = None
return "<SourceLocation file %r, line %r, column %r>" % (
filename, self.line, self.column)
class SourceRange(Structure):
"""
A SourceRange describes a range of source locations within the source
code.
"""
_fields_ = [
("ptr_data", c_void_p * 2),
("begin_int_data", c_uint),
("end_int_data", c_uint)]
# FIXME: Eliminate this and make normal constructor? Requires hiding ctypes
# object.
@staticmethod
def from_locations(start, end):
return conf.lib.clang_getRange(start, end)
@property
def start(self):
"""
Return a SourceLocation representing the first character within a
source range.
"""
return conf.lib.clang_getRangeStart(self)
@property
def end(self):
"""
Return a SourceLocation representing the last character within a
source range.
"""
return conf.lib.clang_getRangeEnd(self)
def __eq__(self, other):
return conf.lib.clang_equalRanges(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def __contains__(self, other):
"""Useful to detect the Token/Lexer bug"""
if not isinstance(other, SourceLocation):
return False
if other.file is None and self.start.file is None:
pass
elif ( self.start.file.name != other.file.name or
other.file.name != self.end.file.name):
# same file name
return False
# same file, in between lines
if self.start.line < other.line < self.end.line:
return True
elif self.start.line == other.line:
# same file first line
if self.start.column <= other.column:
return True
elif other.line == self.end.line:
# same file last line
if other.column <= self.end.column:
return True
return False
def __repr__(self):
return "<SourceRange start %r, end %r>" % (self.start, self.end)
class Diagnostic(object):
"""
A Diagnostic is a single instance of a Clang diagnostic. It includes the
diagnostic severity, the message, the location the diagnostic occurred, as
well as additional source ranges and associated fix-it hints.
"""
Ignored = 0
Note = 1
Warning = 2
Error = 3
Fatal = 4
DisplaySourceLocation = 0x01
DisplayColumn = 0x02
DisplaySourceRanges = 0x04
DisplayOption = 0x08
DisplayCategoryId = 0x10
DisplayCategoryName = 0x20
_FormatOptionsMask = 0x3f
def __init__(self, ptr):
self.ptr = ptr
def __del__(self):
conf.lib.clang_disposeDiagnostic(self)
@property
def severity(self):
return conf.lib.clang_getDiagnosticSeverity(self)
@property
def location(self):
return conf.lib.clang_getDiagnosticLocation(self)
@property
def spelling(self):
return str(conf.lib.clang_getDiagnosticSpelling(self))
@property
def ranges(self):
class RangeIterator:
def __init__(self, diag):
self.diag = diag
def __len__(self):
return int(conf.lib.clang_getDiagnosticNumRanges(self.diag))
def __getitem__(self, key):
if (key >= len(self)):
raise IndexError
return conf.lib.clang_getDiagnosticRange(self.diag, key)
return RangeIterator(self)
@property
def fixits(self):
class FixItIterator:
def __init__(self, diag):
self.diag = diag
def __len__(self):
return int(conf.lib.clang_getDiagnosticNumFixIts(self.diag))
def __getitem__(self, key):
range = SourceRange()
value = str(conf.lib.clang_getDiagnosticFixIt(self.diag, key,
byref(range)))
if len(value) == 0:
raise IndexError
return FixIt(range, value)
return FixItIterator(self)
@property
def children(self):
class ChildDiagnosticsIterator:
def __init__(self, diag):
self.diag_set = conf.lib.clang_getChildDiagnostics(diag)
def __len__(self):
return int(conf.lib.clang_getNumDiagnosticsInSet(self.diag_set))
def __getitem__(self, key):
diag = conf.lib.clang_getDiagnosticInSet(self.diag_set, key)
if not diag:
raise IndexError
return Diagnostic(diag)
return ChildDiagnosticsIterator(self)
@property
def category_number(self):
"""The category number for this diagnostic or 0 if unavailable."""
return conf.lib.clang_getDiagnosticCategory(self)
@property
def category_name(self):
"""The string name of the category for this diagnostic."""
return str(conf.lib.clang_getDiagnosticCategoryText(self))
@property
def option(self):
"""The command-line option that enables this diagnostic."""
return str(conf.lib.clang_getDiagnosticOption(self, None))
@property
def disable_option(self):
"""The command-line option that disables this diagnostic."""
disable = _CXString()
conf.lib.clang_getDiagnosticOption(self, byref(disable))
return str(conf.lib.clang_getCString(disable))
def format(self, options=None):
"""
Format this diagnostic for display. The options argument takes
Diagnostic.Display* flags, which can be combined using bitwise OR. If
the options argument is not provided, the default display options will
be used.
"""
if options is None:
options = conf.lib.clang_defaultDiagnosticDisplayOptions()
if options & ~Diagnostic._FormatOptionsMask:
raise ValueError('Invalid format options')
formatted = conf.lib.clang_formatDiagnostic(self, options)
return str(conf.lib.clang_getCString(formatted))
def __repr__(self):
return "<Diagnostic severity %r, location %r, spelling %r>" % (
self.severity, self.location, self.spelling)
def __str__(self):
return self.format()
def from_param(self):
return self.ptr
class FixIt(object):
"""
A FixIt represents a transformation to be applied to the source to
"fix-it". The fix-it shouldbe applied by replacing the given source range
with the given value.
"""
def __init__(self, range, value):
self.range = range
self.value = value
def __repr__(self):
return "<FixIt range %r, value %r>" % (self.range, self.value)
class TokenGroup(object):
"""Helper class to facilitate token management.
Tokens are allocated from libclang in chunks. They must be disposed of as a
collective group.
One purpose of this class is for instances to represent groups of allocated
tokens. Each token in a group contains a reference back to an instance of
this class. When all tokens from a group are garbage collected, it allows
this class to be garbage collected. When this class is garbage collected,
it calls the libclang destructor which invalidates all tokens in the group.
You should not instantiate this class outside of this module.
"""
def __init__(self, tu, memory, count):
self._tu = tu
self._memory = memory
self._count = count
def __del__(self):
conf.lib.clang_disposeTokens(self._tu, self._memory, self._count)
@staticmethod
def get_tokens(tu, extent):
"""Helper method to return all tokens in an extent.
This functionality is needed multiple places in this module. We define
it here because it seems like a logical place.
"""
tokens_memory = POINTER(Token)()
tokens_count = c_uint()
conf.lib.clang_tokenize(tu, extent, byref(tokens_memory),
byref(tokens_count))
count = int(tokens_count.value)
# If we get no tokens, no memory was allocated. Be sure not to return
# anything and potentially call a destructor on nothing.
if count < 1:
return
tokens_array = cast(tokens_memory, POINTER(Token * count)).contents
token_group = TokenGroup(tu, tokens_memory, tokens_count)
for i in xrange(0, count):
token = Token()
token.int_data = tokens_array[i].int_data
token.ptr_data = tokens_array[i].ptr_data
token._tu = tu
token._group = token_group
yield token
class TokenKind(object):
"""Describes a specific type of a Token."""
_value_map = {} # int -> TokenKind
def __init__(self, value, name):
"""Create a new TokenKind instance from a numeric value and a name."""
self.value = value
self.name = name
def __repr__(self):
return 'TokenKind.%s' % (self.name,)
@staticmethod
def from_value(value):
"""Obtain a registered TokenKind instance from its value."""
result = TokenKind._value_map.get(value, None)
if result is None:
raise ValueError('Unknown TokenKind: %d' % value)
return result
@staticmethod
def register(value, name):
"""Register a new TokenKind enumeration.
This should only be called at module load time by code within this
package.
"""
if value in TokenKind._value_map:
raise ValueError('TokenKind already registered: %d' % value)
kind = TokenKind(value, name)
TokenKind._value_map[value] = kind
setattr(TokenKind, name, kind)
### Cursor Kinds ###
class BaseEnumeration(object):
"""
Common base class for named enumerations held in sync with Index.h values.
Subclasses must define their own _kinds and _name_map members, as:
_kinds = []
_name_map = None
These values hold the per-subclass instances and value-to-name mappings,
respectively.
"""
def __init__(self, value):
if value >= len(self.__class__._kinds):
self.__class__._kinds += [None] * (value - len(self.__class__._kinds) + 1)
if self.__class__._kinds[value] is not None:
raise ValueError('{0} value {1} already loaded'.format(
str(self.__class__), value))
self.value = value
self.__class__._kinds[value] = self
self.__class__._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this cursor kind."""
if self._name_map is None:
self._name_map = {}
for key, value in self.__class__.__dict__.items():
if isinstance(value, self.__class__):
self._name_map[value] = key
return str(self._name_map[self])
@classmethod
def from_id(cls, id):
if id >= len(cls._kinds) or cls._kinds[id] is None:
raise ValueError('Unknown template argument kind %d' % id)
return cls._kinds[id]
def __repr__(self):
return '%s.%s' % (self.__class__, self.name,)
class CursorKind(BaseEnumeration):
"""
A CursorKind describes the kind of entity that a cursor points to.
"""
# The required BaseEnumeration declarations.
_kinds = []
_name_map = None
@staticmethod
def get_all_kinds():
"""Return all CursorKind enumeration instances."""
return [x for x in CursorKind._kinds if x]
def is_declaration(self):
"""Test if this is a declaration kind."""
return conf.lib.clang_isDeclaration(self)
def is_reference(self):
"""Test if this is a reference kind."""
return conf.lib.clang_isReference(self)
def is_expression(self):
"""Test if this is an expression kind."""
return conf.lib.clang_isExpression(self)
def is_statement(self):
"""Test if this is a statement kind."""
return conf.lib.clang_isStatement(self)
def is_attribute(self):
"""Test if this is an attribute kind."""
return conf.lib.clang_isAttribute(self)
def is_invalid(self):
"""Test if this is an invalid kind."""
return conf.lib.clang_isInvalid(self)
def is_translation_unit(self):
"""Test if this is a translation unit kind."""
return conf.lib.clang_isTranslationUnit(self)
def is_preprocessing(self):
"""Test if this is a preprocessing kind."""
return conf.lib.clang_isPreprocessing(self)
def is_unexposed(self):
"""Test if this is an unexposed kind."""
return conf.lib.clang_isUnexposed(self)
def __repr__(self):
return 'CursorKind.%s' % (self.name,)
###
# Declaration Kinds
# A declaration whose specific kind is not exposed via this interface.
#
# Unexposed declarations have the same operations as any other kind of
# declaration; one can extract their location information, spelling, find their
# definitions, etc. However, the specific kind of the declaration is not
# reported.
CursorKind.UNEXPOSED_DECL = CursorKind(1)
# A C or C++ struct.
CursorKind.STRUCT_DECL = CursorKind(2)
# A C or C++ union.
CursorKind.UNION_DECL = CursorKind(3)
# A C++ class.
CursorKind.CLASS_DECL = CursorKind(4)
# An enumeration.
CursorKind.ENUM_DECL = CursorKind(5)
# A field (in C) or non-static data member (in C++) in a struct, union, or C++
# class.
CursorKind.FIELD_DECL = CursorKind(6)
# An enumerator constant.
CursorKind.ENUM_CONSTANT_DECL = CursorKind(7)
# A function.
CursorKind.FUNCTION_DECL = CursorKind(8)
# A variable.
CursorKind.VAR_DECL = CursorKind(9)
# A function or method parameter.
CursorKind.PARM_DECL = CursorKind(10)
# An Objective-C @interface.
CursorKind.OBJC_INTERFACE_DECL = CursorKind(11)
# An Objective-C @interface for a category.
CursorKind.OBJC_CATEGORY_DECL = CursorKind(12)
# An Objective-C @protocol declaration.
CursorKind.OBJC_PROTOCOL_DECL = CursorKind(13)
# An Objective-C @property declaration.
CursorKind.OBJC_PROPERTY_DECL = CursorKind(14)
# An Objective-C instance variable.
CursorKind.OBJC_IVAR_DECL = CursorKind(15)
# An Objective-C instance method.
CursorKind.OBJC_INSTANCE_METHOD_DECL = CursorKind(16)
# An Objective-C class method.
CursorKind.OBJC_CLASS_METHOD_DECL = CursorKind(17)
# An Objective-C @implementation.
CursorKind.OBJC_IMPLEMENTATION_DECL = CursorKind(18)
# An Objective-C @implementation for a category.
CursorKind.OBJC_CATEGORY_IMPL_DECL = CursorKind(19)
# A typedef.
CursorKind.TYPEDEF_DECL = CursorKind(20)
# A C++ class method.
CursorKind.CXX_METHOD = CursorKind(21)
# A C++ namespace.
CursorKind.NAMESPACE = CursorKind(22)
# A linkage specification, e.g. 'extern "C"'.
CursorKind.LINKAGE_SPEC = CursorKind(23)
# A C++ constructor.
CursorKind.CONSTRUCTOR = CursorKind(24)
# A C++ destructor.
CursorKind.DESTRUCTOR = CursorKind(25)
# A C++ conversion function.
CursorKind.CONVERSION_FUNCTION = CursorKind(26)
# A C++ template type parameter
CursorKind.TEMPLATE_TYPE_PARAMETER = CursorKind(27)
# A C++ non-type template paramater.
CursorKind.TEMPLATE_NON_TYPE_PARAMETER = CursorKind(28)
# A C++ template template parameter.
CursorKind.TEMPLATE_TEMPLATE_PARAMETER = CursorKind(29)
# A C++ function template.
CursorKind.FUNCTION_TEMPLATE = CursorKind(30)
# A C++ class template.
CursorKind.CLASS_TEMPLATE = CursorKind(31)
# A C++ class template partial specialization.
CursorKind.CLASS_TEMPLATE_PARTIAL_SPECIALIZATION = CursorKind(32)
# A C++ namespace alias declaration.
CursorKind.NAMESPACE_ALIAS = CursorKind(33)
# A C++ using directive
CursorKind.USING_DIRECTIVE = CursorKind(34)
# A C++ using declaration
CursorKind.USING_DECLARATION = CursorKind(35)
# A Type alias decl.
CursorKind.TYPE_ALIAS_DECL = CursorKind(36)
# A Objective-C synthesize decl
CursorKind.OBJC_SYNTHESIZE_DECL = CursorKind(37)
# A Objective-C dynamic decl
CursorKind.OBJC_DYNAMIC_DECL = CursorKind(38)
# A C++ access specifier decl.
CursorKind.CXX_ACCESS_SPEC_DECL = CursorKind(39)
###
# Reference Kinds
CursorKind.OBJC_SUPER_CLASS_REF = CursorKind(40)
CursorKind.OBJC_PROTOCOL_REF = CursorKind(41)
CursorKind.OBJC_CLASS_REF = CursorKind(42)
# A reference to a type declaration.
#
# A type reference occurs anywhere where a type is named but not
# declared. For example, given:
# typedef unsigned size_type;
# size_type size;
#
# The typedef is a declaration of size_type (CXCursor_TypedefDecl),
# while the type of the variable "size" is referenced. The cursor
# referenced by the type of size is the typedef for size_type.
CursorKind.TYPE_REF = CursorKind(43)
CursorKind.CXX_BASE_SPECIFIER = CursorKind(44)
# A reference to a class template, function template, template
# template parameter, or class template partial specialization.
CursorKind.TEMPLATE_REF = CursorKind(45)
# A reference to a namespace or namepsace alias.
CursorKind.NAMESPACE_REF = CursorKind(46)
# A reference to a member of a struct, union, or class that occurs in
# some non-expression context, e.g., a designated initializer.
CursorKind.MEMBER_REF = CursorKind(47)
# A reference to a labeled statement.
CursorKind.LABEL_REF = CursorKind(48)
# A reference to a set of overloaded functions or function templates
# that has not yet been resolved to a specific function or function template.
CursorKind.OVERLOADED_DECL_REF = CursorKind(49)
# A reference to a variable that occurs in some non-expression
# context, e.g., a C++ lambda capture list.
CursorKind.VARIABLE_REF = CursorKind(50)
###
# Invalid/Error Kinds
CursorKind.INVALID_FILE = CursorKind(70)
CursorKind.NO_DECL_FOUND = CursorKind(71)
CursorKind.NOT_IMPLEMENTED = CursorKind(72)
CursorKind.INVALID_CODE = CursorKind(73)
###
# Expression Kinds
# An expression whose specific kind is not exposed via this interface.
#
# Unexposed expressions have the same operations as any other kind of
# expression; one can extract their location information, spelling, children,
# etc. However, the specific kind of the expression is not reported.
CursorKind.UNEXPOSED_EXPR = CursorKind(100)
# An expression that refers to some value declaration, such as a function,
# varible, or enumerator.
CursorKind.DECL_REF_EXPR = CursorKind(101)
# An expression that refers to a member of a struct, union, class, Objective-C
# class, etc.
CursorKind.MEMBER_REF_EXPR = CursorKind(102)
# An expression that calls a function.
CursorKind.CALL_EXPR = CursorKind(103)
# An expression that sends a message to an Objective-C object or class.
CursorKind.OBJC_MESSAGE_EXPR = CursorKind(104)
# An expression that represents a block literal.
CursorKind.BLOCK_EXPR = CursorKind(105)
# An integer literal.
CursorKind.INTEGER_LITERAL = CursorKind(106)
# A floating point number literal.
CursorKind.FLOATING_LITERAL = CursorKind(107)
# An imaginary number literal.
CursorKind.IMAGINARY_LITERAL = CursorKind(108)
# A string literal.
CursorKind.STRING_LITERAL = CursorKind(109)
# A character literal.
CursorKind.CHARACTER_LITERAL = CursorKind(110)
# A parenthesized expression, e.g. "(1)".
#
# This AST node is only formed if full location information is requested.
CursorKind.PAREN_EXPR = CursorKind(111)
# This represents the unary-expression's (except sizeof and
# alignof).
CursorKind.UNARY_OPERATOR = CursorKind(112)
# [C99 6.5.2.1] Array Subscripting.
CursorKind.ARRAY_SUBSCRIPT_EXPR = CursorKind(113)
# A builtin binary operation expression such as "x + y" or
# "x <= y".
CursorKind.BINARY_OPERATOR = CursorKind(114)
# Compound assignment such as "+=".
CursorKind.COMPOUND_ASSIGNMENT_OPERATOR = CursorKind(115)
# The ?: ternary operator.
CursorKind.CONDITIONAL_OPERATOR = CursorKind(116)
# An explicit cast in C (C99 6.5.4) or a C-style cast in C++
# (C++ [expr.cast]), which uses the syntax (Type)expr.
#
# For example: (int)f.
CursorKind.CSTYLE_CAST_EXPR = CursorKind(117)
# [C99 6.5.2.5]
CursorKind.COMPOUND_LITERAL_EXPR = CursorKind(118)
# Describes an C or C++ initializer list.
CursorKind.INIT_LIST_EXPR = CursorKind(119)
# The GNU address of label extension, representing &&label.
CursorKind.ADDR_LABEL_EXPR = CursorKind(120)
# This is the GNU Statement Expression extension: ({int X=4; X;})
CursorKind.StmtExpr = CursorKind(121)
# Represents a C11 generic selection.
CursorKind.GENERIC_SELECTION_EXPR = CursorKind(122)
# Implements the GNU __null extension, which is a name for a null
# pointer constant that has integral type (e.g., int or long) and is the same
# size and alignment as a pointer.
#
# The __null extension is typically only used by system headers, which define
# NULL as __null in C++ rather than using 0 (which is an integer that may not
# match the size of a pointer).
CursorKind.GNU_NULL_EXPR = CursorKind(123)
# C++'s static_cast<> expression.
CursorKind.CXX_STATIC_CAST_EXPR = CursorKind(124)
# C++'s dynamic_cast<> expression.
CursorKind.CXX_DYNAMIC_CAST_EXPR = CursorKind(125)
# C++'s reinterpret_cast<> expression.
CursorKind.CXX_REINTERPRET_CAST_EXPR = CursorKind(126)
# C++'s const_cast<> expression.
CursorKind.CXX_CONST_CAST_EXPR = CursorKind(127)
# Represents an explicit C++ type conversion that uses "functional"
# notion (C++ [expr.type.conv]).
#
# Example:
# \code
# x = int(0.5);
# \endcode
CursorKind.CXX_FUNCTIONAL_CAST_EXPR = CursorKind(128)
# A C++ typeid expression (C++ [expr.typeid]).
CursorKind.CXX_TYPEID_EXPR = CursorKind(129)
# [C++ 2.13.5] C++ Boolean Literal.
CursorKind.CXX_BOOL_LITERAL_EXPR = CursorKind(130)
# [C++0x 2.14.7] C++ Pointer Literal.
CursorKind.CXX_NULL_PTR_LITERAL_EXPR = CursorKind(131)
# Represents the "this" expression in C++
CursorKind.CXX_THIS_EXPR = CursorKind(132)
# [C++ 15] C++ Throw Expression.
#
# This handles 'throw' and 'throw' assignment-expression. When
# assignment-expression isn't present, Op will be null.
CursorKind.CXX_THROW_EXPR = CursorKind(133)
# A new expression for memory allocation and constructor calls, e.g:
# "new CXXNewExpr(foo)".
CursorKind.CXX_NEW_EXPR = CursorKind(134)
# A delete expression for memory deallocation and destructor calls,
# e.g. "delete[] pArray".
CursorKind.CXX_DELETE_EXPR = CursorKind(135)
# Represents a unary expression.
CursorKind.CXX_UNARY_EXPR = CursorKind(136)
# ObjCStringLiteral, used for Objective-C string literals i.e. "foo".
CursorKind.OBJC_STRING_LITERAL = CursorKind(137)
# ObjCEncodeExpr, used for in Objective-C.
CursorKind.OBJC_ENCODE_EXPR = CursorKind(138)
# ObjCSelectorExpr used for in Objective-C.
CursorKind.OBJC_SELECTOR_EXPR = CursorKind(139)
# Objective-C's protocol expression.
CursorKind.OBJC_PROTOCOL_EXPR = CursorKind(140)
# An Objective-C "bridged" cast expression, which casts between
# Objective-C pointers and C pointers, transferring ownership in the process.
#
# \code
# NSString *str = (__bridge_transfer NSString *)CFCreateString();
# \endcode
CursorKind.OBJC_BRIDGE_CAST_EXPR = CursorKind(141)
# Represents a C++0x pack expansion that produces a sequence of
# expressions.
#
# A pack expansion expression contains a pattern (which itself is an
# expression) followed by an ellipsis. For example:
CursorKind.PACK_EXPANSION_EXPR = CursorKind(142)
# Represents an expression that computes the length of a parameter
# pack.
CursorKind.SIZE_OF_PACK_EXPR = CursorKind(143)
# Represents a C++ lambda expression that produces a local function
# object.
#
# \code
# void abssort(float *x, unsigned N) {
# std::sort(x, x + N,
# [](float a, float b) {
# return std::abs(a) < std::abs(b);
# });
# }
# \endcode
CursorKind.LAMBDA_EXPR = CursorKind(144)
# Objective-c Boolean Literal.
CursorKind.OBJ_BOOL_LITERAL_EXPR = CursorKind(145)
# Represents the "self" expression in a ObjC method.
CursorKind.OBJ_SELF_EXPR = CursorKind(146)
# OpenMP 4.0 [2.4, Array Section].
CursorKind.OMP_ARRAY_SECTION_EXPR = CursorKind(147)
# Represents an @available(...) check.
CursorKind.OBJC_AVAILABILITY_CHECK_EXPR = CursorKind(148)
# A statement whose specific kind is not exposed via this interface.
#
# Unexposed statements have the same operations as any other kind of statement;
# one can extract their location information, spelling, children, etc. However,
# the specific kind of the statement is not reported.
CursorKind.UNEXPOSED_STMT = CursorKind(200)
# A labelled statement in a function.
CursorKind.LABEL_STMT = CursorKind(201)
# A compound statement
CursorKind.COMPOUND_STMT = CursorKind(202)
# A case statement.
CursorKind.CASE_STMT = CursorKind(203)
# A default statement.
CursorKind.DEFAULT_STMT = CursorKind(204)
# An if statement.
CursorKind.IF_STMT = CursorKind(205)
# A switch statement.
CursorKind.SWITCH_STMT = CursorKind(206)
# A while statement.
CursorKind.WHILE_STMT = CursorKind(207)
# A do statement.
CursorKind.DO_STMT = CursorKind(208)
# A for statement.
CursorKind.FOR_STMT = CursorKind(209)
# A goto statement.
CursorKind.GOTO_STMT = CursorKind(210)
# An indirect goto statement.
CursorKind.INDIRECT_GOTO_STMT = CursorKind(211)
# A continue statement.
CursorKind.CONTINUE_STMT = CursorKind(212)
# A break statement.
CursorKind.BREAK_STMT = CursorKind(213)
# A return statement.
CursorKind.RETURN_STMT = CursorKind(214)
# A GNU-style inline assembler statement.
CursorKind.ASM_STMT = CursorKind(215)
# Objective-C's overall @try-@catch-@finally statement.
CursorKind.OBJC_AT_TRY_STMT = CursorKind(216)
# Objective-C's @catch statement.
CursorKind.OBJC_AT_CATCH_STMT = CursorKind(217)
# Objective-C's @finally statement.
CursorKind.OBJC_AT_FINALLY_STMT = CursorKind(218)
# Objective-C's @throw statement.
CursorKind.OBJC_AT_THROW_STMT = CursorKind(219)
# Objective-C's @synchronized statement.
CursorKind.OBJC_AT_SYNCHRONIZED_STMT = CursorKind(220)
# Objective-C's autorealease pool statement.
CursorKind.OBJC_AUTORELEASE_POOL_STMT = CursorKind(221)
# Objective-C's for collection statement.
CursorKind.OBJC_FOR_COLLECTION_STMT = CursorKind(222)
# C++'s catch statement.
CursorKind.CXX_CATCH_STMT = CursorKind(223)
# C++'s try statement.
CursorKind.CXX_TRY_STMT = CursorKind(224)
# C++'s for (* : *) statement.
CursorKind.CXX_FOR_RANGE_STMT = CursorKind(225)
# Windows Structured Exception Handling's try statement.
CursorKind.SEH_TRY_STMT = CursorKind(226)
# Windows Structured Exception Handling's except statement.
CursorKind.SEH_EXCEPT_STMT = CursorKind(227)
# Windows Structured Exception Handling's finally statement.
CursorKind.SEH_FINALLY_STMT = CursorKind(228)
# A MS inline assembly statement extension.
CursorKind.MS_ASM_STMT = CursorKind(229)
# The null statement.
CursorKind.NULL_STMT = CursorKind(230)
# Adaptor class for mixing declarations with statements and expressions.
CursorKind.DECL_STMT = CursorKind(231)
# OpenMP parallel directive.
CursorKind.OMP_PARALLEL_DIRECTIVE = CursorKind(232)
# OpenMP SIMD directive.
CursorKind.OMP_SIMD_DIRECTIVE = CursorKind(233)
# OpenMP for directive.
CursorKind.OMP_FOR_DIRECTIVE = CursorKind(234)
# OpenMP sections directive.
CursorKind.OMP_SECTIONS_DIRECTIVE = CursorKind(235)
# OpenMP section directive.
CursorKind.OMP_SECTION_DIRECTIVE = CursorKind(236)
# OpenMP single directive.
CursorKind.OMP_SINGLE_DIRECTIVE = CursorKind(237)
# OpenMP parallel for directive.
CursorKind.OMP_PARALLEL_FOR_DIRECTIVE = CursorKind(238)
# OpenMP parallel sections directive.
CursorKind.OMP_PARALLEL_SECTIONS_DIRECTIVE = CursorKind(239)
# OpenMP task directive.
CursorKind.OMP_TASK_DIRECTIVE = CursorKind(240)
# OpenMP master directive.
CursorKind.OMP_MASTER_DIRECTIVE = CursorKind(241)
# OpenMP critical directive.
CursorKind.OMP_CRITICAL_DIRECTIVE = CursorKind(242)
# OpenMP taskyield directive.
CursorKind.OMP_TASKYIELD_DIRECTIVE = CursorKind(243)
# OpenMP barrier directive.
CursorKind.OMP_BARRIER_DIRECTIVE = CursorKind(244)
# OpenMP taskwait directive.
CursorKind.OMP_TASKWAIT_DIRECTIVE = CursorKind(245)
# OpenMP flush directive.
CursorKind.OMP_FLUSH_DIRECTIVE = CursorKind(246)
# Windows Structured Exception Handling's leave statement.
CursorKind.SEH_LEAVE_STMT = CursorKind(247)
# OpenMP ordered directive.
CursorKind.OMP_ORDERED_DIRECTIVE = CursorKind(248)
# OpenMP atomic directive.
CursorKind.OMP_ATOMIC_DIRECTIVE = CursorKind(249)
# OpenMP for SIMD directive.
CursorKind.OMP_FOR_SIMD_DIRECTIVE = CursorKind(250)
# OpenMP parallel for SIMD directive.
CursorKind.OMP_PARALLELFORSIMD_DIRECTIVE = CursorKind(251)
# OpenMP target directive.
CursorKind.OMP_TARGET_DIRECTIVE = CursorKind(252)
# OpenMP teams directive.
CursorKind.OMP_TEAMS_DIRECTIVE = CursorKind(253)
# OpenMP taskgroup directive.
CursorKind.OMP_TASKGROUP_DIRECTIVE = CursorKind(254)
# OpenMP cancellation point directive.
CursorKind.OMP_CANCELLATION_POINT_DIRECTIVE = CursorKind(255)
# OpenMP cancel directive.
CursorKind.OMP_CANCEL_DIRECTIVE = CursorKind(256)
# OpenMP target data directive.
CursorKind.OMP_TARGET_DATA_DIRECTIVE = CursorKind(257)
# OpenMP taskloop directive.
CursorKind.OMP_TASK_LOOP_DIRECTIVE = CursorKind(258)
# OpenMP taskloop simd directive.
CursorKind.OMP_TASK_LOOP_SIMD_DIRECTIVE = CursorKind(259)
# OpenMP distribute directive.
CursorKind.OMP_DISTRIBUTE_DIRECTIVE = CursorKind(260)
# OpenMP target enter data directive.
CursorKind.OMP_TARGET_ENTER_DATA_DIRECTIVE = CursorKind(261)
# OpenMP target exit data directive.
CursorKind.OMP_TARGET_EXIT_DATA_DIRECTIVE = CursorKind(262)
# OpenMP target parallel directive.
CursorKind.OMP_TARGET_PARALLEL_DIRECTIVE = CursorKind(263)
# OpenMP target parallel for directive.
CursorKind.OMP_TARGET_PARALLELFOR_DIRECTIVE = CursorKind(264)
# OpenMP target update directive.
CursorKind.OMP_TARGET_UPDATE_DIRECTIVE = CursorKind(265)
# OpenMP distribute parallel for directive.
CursorKind.OMP_DISTRIBUTE_PARALLELFOR_DIRECTIVE = CursorKind(266)
# OpenMP distribute parallel for simd directive.
CursorKind.OMP_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE = CursorKind(267)
# OpenMP distribute simd directive.
CursorKind.OMP_DISTRIBUTE_SIMD_DIRECTIVE = CursorKind(268)
# OpenMP target parallel for simd directive.
CursorKind.OMP_TARGET_PARALLEL_FOR_SIMD_DIRECTIVE = CursorKind(269)
# OpenMP target simd directive.
CursorKind.OMP_TARGET_SIMD_DIRECTIVE = CursorKind(270)
# OpenMP teams distribute directive.
CursorKind.OMP_TEAMS_DISTRIBUTE_DIRECTIVE = CursorKind(271)
###
# Other Kinds
# Cursor that represents the translation unit itself.
#
# The translation unit cursor exists primarily to act as the root cursor for
# traversing the contents of a translation unit.
CursorKind.TRANSLATION_UNIT = CursorKind(300)
###
# Attributes
# An attribute whoe specific kind is note exposed via this interface
CursorKind.UNEXPOSED_ATTR = CursorKind(400)
CursorKind.IB_ACTION_ATTR = CursorKind(401)
CursorKind.IB_OUTLET_ATTR = CursorKind(402)
CursorKind.IB_OUTLET_COLLECTION_ATTR = CursorKind(403)
CursorKind.CXX_FINAL_ATTR = CursorKind(404)
CursorKind.CXX_OVERRIDE_ATTR = CursorKind(405)
CursorKind.ANNOTATE_ATTR = CursorKind(406)
CursorKind.ASM_LABEL_ATTR = CursorKind(407)
CursorKind.PACKED_ATTR = CursorKind(408)
CursorKind.PURE_ATTR = CursorKind(409)
CursorKind.CONST_ATTR = CursorKind(410)
CursorKind.NODUPLICATE_ATTR = CursorKind(411)
CursorKind.CUDACONSTANT_ATTR = CursorKind(412)
CursorKind.CUDADEVICE_ATTR = CursorKind(413)
CursorKind.CUDAGLOBAL_ATTR = CursorKind(414)
CursorKind.CUDAHOST_ATTR = CursorKind(415)
CursorKind.CUDASHARED_ATTR = CursorKind(416)
CursorKind.VISIBILITY_ATTR = CursorKind(417)
CursorKind.DLLEXPORT_ATTR = CursorKind(418)
CursorKind.DLLIMPORT_ATTR = CursorKind(419)
###
# Preprocessing
CursorKind.PREPROCESSING_DIRECTIVE = CursorKind(500)
CursorKind.MACRO_DEFINITION = CursorKind(501)
CursorKind.MACRO_INSTANTIATION = CursorKind(502)
CursorKind.INCLUSION_DIRECTIVE = CursorKind(503)
###
# Extra declaration
# A module import declaration.
CursorKind.MODULE_IMPORT_DECL = CursorKind(600)
# A type alias template declaration
CursorKind.TYPE_ALIAS_TEMPLATE_DECL = CursorKind(601)
# A static_assert or _Static_assert node
CursorKind.STATIC_ASSERT = CursorKind(602)
# A friend declaration
CursorKind.FRIEND_DECL = CursorKind(603)
# A code completion overload candidate.
CursorKind.OVERLOAD_CANDIDATE = CursorKind(700)
### Template Argument Kinds ###
class TemplateArgumentKind(BaseEnumeration):
"""
A TemplateArgumentKind describes the kind of entity that a template argument
represents.
"""
# The required BaseEnumeration declarations.
_kinds = []
_name_map = None
TemplateArgumentKind.NULL = TemplateArgumentKind(0)
TemplateArgumentKind.TYPE = TemplateArgumentKind(1)
TemplateArgumentKind.DECLARATION = TemplateArgumentKind(2)
TemplateArgumentKind.NULLPTR = TemplateArgumentKind(3)
TemplateArgumentKind.INTEGRAL = TemplateArgumentKind(4)
### Cursors ###
class Cursor(Structure):
"""
The Cursor class represents a reference to an element within the AST. It
acts as a kind of iterator.
"""
_fields_ = [("_kind_id", c_int), ("xdata", c_int), ("data", c_void_p * 3)]
@staticmethod
def from_location(tu, location):
# We store a reference to the TU in the instance so the TU won't get
# collected before the cursor.
cursor = conf.lib.clang_getCursor(tu, location)
cursor._tu = tu
return cursor
def __eq__(self, other):
return conf.lib.clang_equalCursors(self, other)
def __ne__(self, other):
return not self.__eq__(other)
def is_definition(self):
"""
Returns true if the declaration pointed at by the cursor is also a
definition of that entity.
"""
return conf.lib.clang_isCursorDefinition(self)
def is_const_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'const'.
"""
return conf.lib.clang_CXXMethod_isConst(self)
def is_converting_constructor(self):
"""Returns True if the cursor refers to a C++ converting constructor.
"""
return conf.lib.clang_CXXConstructor_isConvertingConstructor(self)
def is_copy_constructor(self):
"""Returns True if the cursor refers to a C++ copy constructor.
"""
return conf.lib.clang_CXXConstructor_isCopyConstructor(self)
def is_default_constructor(self):
"""Returns True if the cursor refers to a C++ default constructor.
"""
return conf.lib.clang_CXXConstructor_isDefaultConstructor(self)
def is_move_constructor(self):
"""Returns True if the cursor refers to a C++ move constructor.
"""
return conf.lib.clang_CXXConstructor_isMoveConstructor(self)
def is_default_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared '= default'.
"""
return conf.lib.clang_CXXMethod_isDefaulted(self)
def is_mutable_field(self):
"""Returns True if the cursor refers to a C++ field that is declared
'mutable'.
"""
return conf.lib.clang_CXXField_isMutable(self)
def is_pure_virtual_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared pure virtual.
"""
return conf.lib.clang_CXXMethod_isPureVirtual(self)
def is_static_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'static'.
"""
return conf.lib.clang_CXXMethod_isStatic(self)
def is_virtual_method(self):
"""Returns True if the cursor refers to a C++ member function or member
function template that is declared 'virtual'.
"""
return conf.lib.clang_CXXMethod_isVirtual(self)
def get_definition(self):
"""
If the cursor is a reference to a declaration or a declaration of
some entity, return a cursor that points to the definition of that
entity.
"""
# TODO: Should probably check that this is either a reference or
# declaration prior to issuing the lookup.
return conf.lib.clang_getCursorDefinition(self)
def get_usr(self):
"""Return the Unified Symbol Resultion (USR) for the entity referenced
by the given cursor (or None).
A Unified Symbol Resolution (USR) is a string that identifies a
particular entity (function, class, variable, etc.) within a
program. USRs can be compared across translation units to determine,
e.g., when references in one translation refer to an entity defined in
another translation unit."""
return conf.lib.clang_getCursorUSR(self)
@property
def kind(self):
"""Return the kind of this cursor."""
return CursorKind.from_id(self._kind_id)
@property
def spelling(self):
"""Return the spelling of the entity pointed at by the cursor."""
if not hasattr(self, '_spelling'):
self._spelling = str(conf.lib.clang_getCursorSpelling(self))
return str(self._spelling)
@property
def displayname(self):
"""
Return the display name for the entity referenced by this cursor.
The display name contains extra information that helps identify the
cursor, such as the parameters of a function or template or the
arguments of a class template specialization.
"""
if not hasattr(self, '_displayname'):
self._displayname = str(conf.lib.clang_getCursorDisplayName(self))
return self._displayname
@property
def mangled_name(self):
"""Return the mangled name for the entity referenced by this cursor."""
if not hasattr(self, '_mangled_name'):
self._mangled_name = str(conf.lib.clang_Cursor_getMangling(self))
return self._mangled_name
@property
def location(self):
"""
Return the source location (the starting character) of the entity
pointed at by the cursor.
"""
if not hasattr(self, '_loc'):
self._loc = conf.lib.clang_getCursorLocation(self)
return self._loc
@property
def extent(self):
"""
Return the source range (the range of text) occupied by the entity
pointed at by the cursor.
"""
if not hasattr(self, '_extent'):
self._extent = conf.lib.clang_getCursorExtent(self)
return self._extent
@property
def storage_class(self):
"""
Retrieves the storage class (if any) of the entity pointed at by the
cursor.
"""
if not hasattr(self, '_storage_class'):
self._storage_class = conf.lib.clang_Cursor_getStorageClass(self)
return StorageClass.from_id(self._storage_class)
@property
def access_specifier(self):
"""
Retrieves the access specifier (if any) of the entity pointed at by the
cursor.
"""
if not hasattr(self, '_access_specifier'):
self._access_specifier = conf.lib.clang_getCXXAccessSpecifier(self)
return AccessSpecifier.from_id(self._access_specifier)
@property
def type(self):
"""
Retrieve the Type (if any) of the entity pointed at by the cursor.
"""
if not hasattr(self, '_type'):
self._type = conf.lib.clang_getCursorType(self)
return self._type
@property
def canonical(self):
"""Return the canonical Cursor corresponding to this Cursor.
The canonical cursor is the cursor which is representative for the
underlying entity. For example, if you have multiple forward
declarations for the same class, the canonical cursor for the forward
declarations will be identical.
"""
if not hasattr(self, '_canonical'):
self._canonical = conf.lib.clang_getCanonicalCursor(self)
return self._canonical
@property
def result_type(self):
"""Retrieve the Type of the result for this Cursor."""
if not hasattr(self, '_result_type'):
self._result_type = conf.lib.clang_getResultType(self.type)
return self._result_type
@property
def underlying_typedef_type(self):
"""Return the underlying type of a typedef declaration.
Returns a Type for the typedef this cursor is a declaration for. If
the current cursor is not a typedef, this raises.
"""
if not hasattr(self, '_underlying_type'):
assert self.kind.is_declaration()
self._underlying_type = \
conf.lib.clang_getTypedefDeclUnderlyingType(self)
return self._underlying_type
@property
def enum_type(self):
"""Return the integer type of an enum declaration.
Returns a Type corresponding to an integer. If the cursor is not for an
enum, this raises.
"""
if not hasattr(self, '_enum_type'):
assert self.kind == CursorKind.ENUM_DECL
self._enum_type = conf.lib.clang_getEnumDeclIntegerType(self)
return self._enum_type
@property
def enum_value(self):
"""Return the value of an enum constant."""
if not hasattr(self, '_enum_value'):
assert self.kind == CursorKind.ENUM_CONSTANT_DECL
# Figure out the underlying type of the enum to know if it
# is a signed or unsigned quantity.
underlying_type = self.type
if underlying_type.kind == TypeKind.ENUM:
underlying_type = underlying_type.get_declaration().enum_type
if underlying_type.kind in (TypeKind.CHAR_U,
TypeKind.UCHAR,
TypeKind.CHAR16,
TypeKind.CHAR32,
TypeKind.USHORT,
TypeKind.UINT,
TypeKind.ULONG,
TypeKind.ULONGLONG,
TypeKind.UINT128):
self._enum_value = \
conf.lib.clang_getEnumConstantDeclUnsignedValue(self)
else:
self._enum_value = conf.lib.clang_getEnumConstantDeclValue(self)
return self._enum_value
@property
def objc_type_encoding(self):
"""Return the Objective-C type encoding as a str."""
if not hasattr(self, '_objc_type_encoding'):
self._objc_type_encoding = \
conf.lib.clang_getDeclObjCTypeEncoding(self)
return str(self._objc_type_encoding)
@property
def hash(self):
"""Returns a hash of the cursor as an int."""
if not hasattr(self, '_hash'):
self._hash = conf.lib.clang_hashCursor(self)
return self._hash
@property
def semantic_parent(self):
"""Return the semantic parent for this cursor."""
if not hasattr(self, '_semantic_parent'):
self._semantic_parent = conf.lib.clang_getCursorSemanticParent(self)
return self._semantic_parent
@property
def lexical_parent(self):
"""Return the lexical parent for this cursor."""
if not hasattr(self, '_lexical_parent'):
self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self)
return self._lexical_parent
@property
def translation_unit(self):
"""Returns the TranslationUnit to which this Cursor belongs."""
# If this triggers an AttributeError, the instance was not properly
# created.
return self._tu
@property
def referenced(self):
"""
For a cursor that is a reference, returns a cursor
representing the entity that it references.
"""
if not hasattr(self, '_referenced'):
self._referenced = conf.lib.clang_getCursorReferenced(self)
return self._referenced
@property
def brief_comment(self):
"""Returns the brief comment text associated with that Cursor"""
r = conf.lib.clang_Cursor_getBriefCommentText(self)
if not r:
return None
return str(r)
@property
def raw_comment(self):
"""Returns the raw comment text associated with that Cursor"""
r = conf.lib.clang_Cursor_getRawCommentText(self)
if not r:
return None
return str(r)
def get_arguments(self):
"""Return an iterator for accessing the arguments of this cursor."""
num_args = conf.lib.clang_Cursor_getNumArguments(self)
for i in xrange(0, num_args):
yield conf.lib.clang_Cursor_getArgument(self, i)
def get_num_template_arguments(self):
"""Returns the number of template args associated with this cursor."""
return conf.lib.clang_Cursor_getNumTemplateArguments(self)
def get_template_argument_kind(self, num):
"""Returns the TemplateArgumentKind for the indicated template
argument."""
return conf.lib.clang_Cursor_getTemplateArgumentKind(self, num)
def get_template_argument_type(self, num):
"""Returns the CXType for the indicated template argument."""
return conf.lib.clang_Cursor_getTemplateArgumentType(self, num)
def get_template_argument_value(self, num):
"""Returns the value of the indicated arg as a signed 64b integer."""
return conf.lib.clang_Cursor_getTemplateArgumentValue(self, num)
def get_template_argument_unsigned_value(self, num):
"""Returns the value of the indicated arg as an unsigned 64b integer."""
return conf.lib.clang_Cursor_getTemplateArgumentUnsignedValue(self, num)
def get_children(self):
"""Return an iterator for accessing the children of this cursor."""
# FIXME: Expose iteration from CIndex, PR6125.
def visitor(child, parent, children):
# FIXME: Document this assertion in API.
# FIXME: There should just be an isNull method.
assert child != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
child._tu = self._tu
children.append(child)
return 1 # continue
children = []
conf.lib.clang_visitChildren(self, callbacks['cursor_visit'](visitor),
children)
return iter(children)
def walk_preorder(self):
"""Depth-first preorder walk over the cursor and its descendants.
Yields cursors.
"""
yield self
for child in self.get_children():
for descendant in child.walk_preorder():
yield descendant
def get_tokens(self):
"""Obtain Token instances formulating that compose this Cursor.
This is a generator for Token instances. It returns all tokens which
occupy the extent this cursor occupies.
"""
return TokenGroup.get_tokens(self._tu, self.extent)
def get_field_offsetof(self):
"""Returns the offsetof the FIELD_DECL pointed by this Cursor."""
return conf.lib.clang_Cursor_getOffsetOfField(self)
def is_anonymous(self):
"""
Check if the record is anonymous.
"""
if self.kind == CursorKind.FIELD_DECL:
return self.type.get_declaration().is_anonymous()
return conf.lib.clang_Cursor_isAnonymous(self)
def is_bitfield(self):
"""
Check if the field is a bitfield.
"""
return conf.lib.clang_Cursor_isBitField(self)
def get_bitfield_width(self):
"""
Retrieve the width of a bitfield.
"""
return conf.lib.clang_getFieldDeclBitWidth(self)
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, Cursor)
# FIXME: There should just be an isNull method.
if res == conf.lib.clang_getNullCursor():
return None
# Store a reference to the TU in the Python object so it won't get GC'd
# before the Cursor.
tu = None
for arg in args:
if isinstance(arg, TranslationUnit):
tu = arg
break
if hasattr(arg, 'translation_unit'):
tu = arg.translation_unit
break
assert tu is not None
res._tu = tu
return res
@staticmethod
def from_cursor_result(res, fn, args):
assert isinstance(res, Cursor)
if res == conf.lib.clang_getNullCursor():
return None
res._tu = args[0]._tu
return res
class StorageClass(object):
"""
Describes the storage class of a declaration
"""
# The unique kind objects, index by id.
_kinds = []
_name_map = None
def __init__(self, value):
if value >= len(StorageClass._kinds):
StorageClass._kinds += [None] * (value - len(StorageClass._kinds) + 1)
if StorageClass._kinds[value] is not None:
raise ValueError('StorageClass already loaded')
self.value = value
StorageClass._kinds[value] = self
StorageClass._name_map = None
def from_param(self):
return self.value
@property
def name(self):
"""Get the enumeration name of this storage class."""
if self._name_map is None:
self._name_map = {}
for key,value in StorageClass.__dict__.items():
if isinstance(value,StorageClass):
self._name_map[value] = key
return self._name_map[self]
@staticmethod
def from_id(id):
if id >= len(StorageClass._kinds) or not StorageClass._kinds[id]:
raise ValueError('Unknown storage class %d' % id)
return StorageClass._kinds[id]
def __repr__(self):
return 'StorageClass.%s' % (self.name,)
StorageClass.INVALID = StorageClass(0)
StorageClass.NONE = StorageClass(1)
StorageClass.EXTERN = StorageClass(2)
StorageClass.STATIC = StorageClass(3)
StorageClass.PRIVATEEXTERN = StorageClass(4)
StorageClass.OPENCLWORKGROUPLOCAL = StorageClass(5)
StorageClass.AUTO = StorageClass(6)
StorageClass.REGISTER = StorageClass(7)
### C++ access specifiers ###
class AccessSpecifier(BaseEnumeration):
"""
Describes the access of a C++ class member
"""
# The unique kind objects, index by id.
_kinds = []
_name_map = None
def from_param(self):
return self.value
def __repr__(self):
return 'AccessSpecifier.%s' % (self.name,)
AccessSpecifier.INVALID = AccessSpecifier(0)
AccessSpecifier.PUBLIC = AccessSpecifier(1)
AccessSpecifier.PROTECTED = AccessSpecifier(2)
AccessSpecifier.PRIVATE = AccessSpecifier(3)
AccessSpecifier.NONE = AccessSpecifier(4)
### Type Kinds ###
class TypeKind(BaseEnumeration):
"""
Describes the kind of type.
"""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
@property
def spelling(self):
"""Retrieve the spelling of this TypeKind."""
return str(conf.lib.clang_getTypeKindSpelling(self.value))
def __repr__(self):
return 'TypeKind.%s' % (self.name,)
TypeKind.INVALID = TypeKind(0)
TypeKind.UNEXPOSED = TypeKind(1)
TypeKind.VOID = TypeKind(2)
TypeKind.BOOL = TypeKind(3)
TypeKind.CHAR_U = TypeKind(4)
TypeKind.UCHAR = TypeKind(5)
TypeKind.CHAR16 = TypeKind(6)
TypeKind.CHAR32 = TypeKind(7)
TypeKind.USHORT = TypeKind(8)
TypeKind.UINT = TypeKind(9)
TypeKind.ULONG = TypeKind(10)
TypeKind.ULONGLONG = TypeKind(11)
TypeKind.UINT128 = TypeKind(12)
TypeKind.CHAR_S = TypeKind(13)
TypeKind.SCHAR = TypeKind(14)
TypeKind.WCHAR = TypeKind(15)
TypeKind.SHORT = TypeKind(16)
TypeKind.INT = TypeKind(17)
TypeKind.LONG = TypeKind(18)
TypeKind.LONGLONG = TypeKind(19)
TypeKind.INT128 = TypeKind(20)
TypeKind.FLOAT = TypeKind(21)
TypeKind.DOUBLE = TypeKind(22)
TypeKind.LONGDOUBLE = TypeKind(23)
TypeKind.NULLPTR = TypeKind(24)
TypeKind.OVERLOAD = TypeKind(25)
TypeKind.DEPENDENT = TypeKind(26)
TypeKind.OBJCID = TypeKind(27)
TypeKind.OBJCCLASS = TypeKind(28)
TypeKind.OBJCSEL = TypeKind(29)
TypeKind.FLOAT128 = TypeKind(30)
TypeKind.COMPLEX = TypeKind(100)
TypeKind.POINTER = TypeKind(101)
TypeKind.BLOCKPOINTER = TypeKind(102)
TypeKind.LVALUEREFERENCE = TypeKind(103)
TypeKind.RVALUEREFERENCE = TypeKind(104)
TypeKind.RECORD = TypeKind(105)
TypeKind.ENUM = TypeKind(106)
TypeKind.TYPEDEF = TypeKind(107)
TypeKind.OBJCINTERFACE = TypeKind(108)
TypeKind.OBJCOBJECTPOINTER = TypeKind(109)
TypeKind.FUNCTIONNOPROTO = TypeKind(110)
TypeKind.FUNCTIONPROTO = TypeKind(111)
TypeKind.CONSTANTARRAY = TypeKind(112)
TypeKind.VECTOR = TypeKind(113)
TypeKind.INCOMPLETEARRAY = TypeKind(114)
TypeKind.VARIABLEARRAY = TypeKind(115)
TypeKind.DEPENDENTSIZEDARRAY = TypeKind(116)
TypeKind.MEMBERPOINTER = TypeKind(117)
TypeKind.AUTO = TypeKind(118)
TypeKind.ELABORATED = TypeKind(119)
class RefQualifierKind(BaseEnumeration):
"""Describes a specific ref-qualifier of a type."""
# The unique kind objects, indexed by id.
_kinds = []
_name_map = None
def from_param(self):
return self.value
def __repr__(self):
return 'RefQualifierKind.%s' % (self.name,)
RefQualifierKind.NONE = RefQualifierKind(0)
RefQualifierKind.LVALUE = RefQualifierKind(1)
RefQualifierKind.RVALUE = RefQualifierKind(2)
class Type(Structure):
"""
The type of an element in the abstract syntax tree.
"""
_fields_ = [("_kind_id", c_int), ("data", c_void_p * 2)]
@property
def kind(self):
"""Return the kind of this type."""
return TypeKind.from_id(self._kind_id)
def argument_types(self):
"""Retrieve a container for the non-variadic arguments for this type.
The returned object is iterable and indexable. Each item in the
container is a Type instance.
"""
class ArgumentsIterator(collections.Sequence):
def __init__(self, parent):
self.parent = parent
self.length = None
def __len__(self):
if self.length is None:
self.length = conf.lib.clang_getNumArgTypes(self.parent)
return self.length
def __getitem__(self, key):
# FIXME Support slice objects.
if not isinstance(key, int):
raise TypeError("Must supply a non-negative int.")
if key < 0:
raise IndexError("Only non-negative indexes are accepted.")
if key >= len(self):
raise IndexError("Index greater than container length: "
"%d > %d" % ( key, len(self) ))
result = conf.lib.clang_getArgType(self.parent, key)
if result.kind == TypeKind.INVALID:
raise IndexError("Argument could not be retrieved.")
return result
assert self.kind == TypeKind.FUNCTIONPROTO
return ArgumentsIterator(self)
@property
def element_type(self):
"""Retrieve the Type of elements within this Type.
If accessed on a type that is not an array, complex, or vector type, an
exception will be raised.
"""
result = conf.lib.clang_getElementType(self)
if result.kind == TypeKind.INVALID:
raise Exception('Element type not available on this type.')
return result
@property
def element_count(self):
"""Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises.
"""
result = conf.lib.clang_getNumElements(self)
if result < 0:
raise Exception('Type does not have elements.')
return result
@property
def translation_unit(self):
"""The TranslationUnit to which this Type is associated."""
# If this triggers an AttributeError, the instance was not properly
# instantiated.
return self._tu
@staticmethod
def from_result(res, fn, args):
assert isinstance(res, Type)
tu = None
for arg in args:
if hasattr(arg, 'translation_unit'):
tu = arg.translation_unit
break
assert tu is not None
res._tu = tu
return res
def get_canonical(self):
"""
Return the canonical type for a Type.
Clang's type system explicitly models typedefs and all the
ways a specific type can be represented. The canonical type
is the underlying type with all the "sugar" removed. For
example, if 'T' is a typedef for 'int', the canonical type for
'T' would be 'int'.
"""
return conf.lib.clang_getCanonicalType(self)
def is_const_qualified(self):
"""Determine whether a Type has the "const" qualifier set.
This does not look through typedefs that may have added "const"
at a different level.
"""
return conf.lib.clang_isConstQualifiedType(self)
def is_volatile_qualified(self):
"""Determine whether a Type has the "volatile" qualifier set.
This does not look through typedefs that may have added "volatile"
at a different level.
"""
return conf.lib.clang_isVolatileQualifiedType(self)
def is_restrict_qualified(self):
"""Determine whether a Type has the "restrict" qualifier set.
This does not look through typedefs that may have added "restrict" at
a different level.
"""
return conf.lib.clang_isRestrictQualifiedType(self)
def is_function_variadic(self):
"""Determine whether this function Type is a variadic function type."""
assert self.kind == TypeKind.FUNCTIONPROTO
return conf.lib.clang_isFunctionTypeVariadic(self)
def is_pod(self):
"""Determine whether this Type represents plain old data (POD)."""
return conf.lib.clang_isPODType(self)
def get_pointee(self):
"""
For pointer types, returns the type of the pointee.
"""
return conf.lib.clang_getPointeeType(self)
def get_declaration(self):
"""
Return the cursor for the declaration of the given type.
"""
return conf.lib.clang_getTypeDeclaration(self)
def get_result(self):
"""
Retrieve the result type associated with a function type.
"""
return conf.lib.clang_getResultType(self)
def get_array_element_type(self):
"""
Retrieve the type of the elements of the array type.
"""
return conf.lib.clang_getArrayElementType(self)
def get_array_size(self):
"""
Retrieve the size of the constant array.
"""
return conf.lib.clang_getArraySize(self)
def get_class_type(self):
"""
Retrieve the class type of the member pointer type.
"""
return conf.lib.clang_Type_getClassType(self)
def get_named_type(self):
"""
Retrieve the type named by the qualified-id.
"""
return conf.lib.clang_Type_getNamedType(self)
def get_align(self):
"""
Retrieve the alignment of the record.
"""
return conf.lib.clang_Type_getAlignOf(self)
def get_size(self):
"""
Retrieve the size of the record.
"""
return conf.lib.clang_Type_getSizeOf(self)
def get_offset(self, fieldname):
"""
Retrieve the offset of a field in the record.
"""
return conf.lib.clang_Type_getOffsetOf(self, fieldname)
def get_ref_qualifier(self):
"""
Retrieve the ref-qualifier of the type.
"""
return RefQualifierKind.from_id(
conf.lib.clang_Type_getCXXRefQualifier(self))
def get_fields(self):
"""Return an iterator for accessing the fields of this type."""
def visitor(field, children):
assert field != conf.lib.clang_getNullCursor()
# Create reference to TU so it isn't GC'd before Cursor.
field._tu = self._tu
fields.append(field)
return 1 # continue
fields = []
conf.lib.clang_Type_visitFields(self,
callbacks['fields_visit'](visitor), fields)
return iter(fields)
@property
def spelling(self):
"""Retrieve the spelling of this Type."""
return str(conf.lib.clang_getTypeSpelling(self))
def __eq__(self, other):
if type(other) != type(self):
return False
return conf.lib.clang_equalTypes(self, other)
def __ne__(self, other):
return not self.__eq__(other)
## CIndex Objects ##
# CIndex objects (derived from ClangObject) are essentially lightweight
# wrappers attached to some underlying object, which is exposed via CIndex as
# a void*.
class ClangObject(object):
"""
A helper for Clang objects. This class helps act as an intermediary for
the ctypes library and the Clang CIndex library.
"""
def __init__(self, obj):
assert isinstance(obj, c_object_p) and obj
self.obj = self._as_parameter_ = obj
def from_param(self):
return self._as_parameter_
class _CXUnsavedFile(Structure):
"""Helper for passing unsaved file arguments."""
_fields_ = [("name", c_string_p), ("contents", c_string_p), ('length', c_ulong)]
# Functions calls through the python interface are rather slow. Fortunately,
# for most symboles, we do not need to perform a function call. Their spelling
# never changes and is consequently provided by this spelling cache.
SpellingCache = {
# 0: CompletionChunk.Kind("Optional"),
# 1: CompletionChunk.Kind("TypedText"),
# 2: CompletionChunk.Kind("Text"),
# 3: CompletionChunk.Kind("Placeholder"),
# 4: CompletionChunk.Kind("Informative"),
# 5 : CompletionChunk.Kind("CurrentParameter"),
6: '(', # CompletionChunk.Kind("LeftParen"),
7: ')', # CompletionChunk.Kind("RightParen"),
8: '[', # CompletionChunk.Kind("LeftBracket"),
9: ']', # CompletionChunk.Kind("RightBracket"),
10: '{', # CompletionChunk.Kind("LeftBrace"),
11: '}', # CompletionChunk.Kind("RightBrace"),
12: '<', # CompletionChunk.Kind("LeftAngle"),
13: '>', # CompletionChunk.Kind("RightAngle"),
14: ', ', # CompletionChunk.Kind("Comma"),
# 15: CompletionChunk.Kind("ResultType"),
16: ':', # CompletionChunk.Kind("Colon"),
17: ';', # CompletionChunk.Kind("SemiColon"),
18: '=', # CompletionChunk.Kind("Equal"),
19: ' ', # CompletionChunk.Kind("HorizontalSpace"),
# 20: CompletionChunk.Kind("VerticalSpace")
}
class CompletionChunk:
class Kind:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "<ChunkKind: %s>" % self
def __init__(self, completionString, key):
self.cs = completionString
self.key = key
self.__kindNumberCache = -1
def __repr__(self):
return "{'" + str(self.spelling) + "', " + str(self.kind) + "}"
@CachedProperty
def spelling(self):
if self.__kindNumber in SpellingCache:
return SpellingCache[self.__kindNumber]
return conf.lib.clang_getCompletionChunkText(self.cs, self.key).spelling
# We do not use @CachedProperty here, as the manual implementation is
# apparently still significantly faster. Please profile carefully if you
# would like to add CachedProperty back.
@property
def __kindNumber(self):
if self.__kindNumberCache == -1:
self.__kindNumberCache = \
conf.lib.clang_getCompletionChunkKind(self.cs, self.key)
return self.__kindNumberCache
@CachedProperty
def kind(self):
return completionChunkKindMap[self.__kindNumber]
@CachedProperty
def string(self):
res = conf.lib.clang_getCompletionChunkCompletionString(self.cs,
self.key)
if (res):
return CompletionString(res)
else:
None
def isKindOptional(self):
return self.__kindNumber == 0
def isKindTypedText(self):
return self.__kindNumber == 1
def isKindPlaceHolder(self):
return self.__kindNumber == 3
def isKindInformative(self):
return self.__kindNumber == 4
def isKindResultType(self):
return self.__kindNumber == 15
completionChunkKindMap = {
0: CompletionChunk.Kind("Optional"),
1: CompletionChunk.Kind("TypedText"),
2: CompletionChunk.Kind("Text"),
3: CompletionChunk.Kind("Placeholder"),
4: CompletionChunk.Kind("Informative"),
5: CompletionChunk.Kind("CurrentParameter"),
6: CompletionChunk.Kind("LeftParen"),
7: CompletionChunk.Kind("RightParen"),
8: CompletionChunk.Kind("LeftBracket"),
9: CompletionChunk.Kind("RightBracket"),
10: CompletionChunk.Kind("LeftBrace"),
11: CompletionChunk.Kind("RightBrace"),
12: CompletionChunk.Kind("LeftAngle"),
13: CompletionChunk.Kind("RightAngle"),
14: CompletionChunk.Kind("Comma"),
15: CompletionChunk.Kind("ResultType"),
16: CompletionChunk.Kind("Colon"),
17: CompletionChunk.Kind("SemiColon"),
18: CompletionChunk.Kind("Equal"),
19: CompletionChunk.Kind("HorizontalSpace"),
20: CompletionChunk.Kind("VerticalSpace")}
class CompletionString(ClangObject):
class Availability:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def __repr__(self):
return "<Availability: %s>" % self
def __len__(self):
return self.num_chunks
@CachedProperty
def num_chunks(self):
return conf.lib.clang_getNumCompletionChunks(self.obj)
def __getitem__(self, key):
if self.num_chunks <= key:
raise IndexError
return CompletionChunk(self.obj, key)
@property
def priority(self):
return conf.lib.clang_getCompletionPriority(self.obj)
@property
def availability(self):
res = conf.lib.clang_getCompletionAvailability(self.obj)
return availabilityKinds[res]
@property
def briefComment(self):
if conf.function_exists("clang_getCompletionBriefComment"):
return conf.lib.clang_getCompletionBriefComment(self.obj)
return _CXString()
def __repr__(self):
return " | ".join([str(a) for a in self]) \
+ " || Priority: " + str(self.priority) \
+ " || Availability: " + str(self.availability) \
+ " || Brief comment: " + str(self.briefComment.spelling)
availabilityKinds = {
0: CompletionChunk.Kind("Available"),
1: CompletionChunk.Kind("Deprecated"),
2: CompletionChunk.Kind("NotAvailable"),
3: CompletionChunk.Kind("NotAccessible")}
class CodeCompletionResult(Structure):
_fields_ = [('cursorKind', c_int), ('completionString', c_object_p)]
def __repr__(self):
return str(CompletionString(self.completionString))
@property
def kind(self):
return CursorKind.from_id(self.cursorKind)
@property
def string(self):
return CompletionString(self.completionString)
class CCRStructure(Structure):
_fields_ = [('results', POINTER(CodeCompletionResult)),
('numResults', c_int)]
def __len__(self):
return self.numResults
def __getitem__(self, key):
if len(self) <= key:
raise IndexError
return self.results[key]
class CodeCompletionResults(ClangObject):
def __init__(self, ptr):
assert isinstance(ptr, POINTER(CCRStructure)) and ptr
self.ptr = self._as_parameter_ = ptr
def from_param(self):
return self._as_parameter_
def __del__(self):
conf.lib.clang_disposeCodeCompleteResults(self)
@property
def results(self):
return self.ptr.contents
@property
def diagnostics(self):
class DiagnosticsItr:
def __init__(self, ccr):
self.ccr= ccr
def __len__(self):
return int(\
conf.lib.clang_codeCompleteGetNumDiagnostics(self.ccr))
def __getitem__(self, key):
return conf.lib.clang_codeCompleteGetDiagnostic(self.ccr, key)
return DiagnosticsItr(self)
class Index(ClangObject):
"""
The Index type provides the primary interface to the Clang CIndex library,
primarily by providing an interface for reading and parsing translation
units.
"""
@staticmethod
def create(excludeDecls=False):
"""
Create a new Index.
Parameters:
excludeDecls -- Exclude local declarations from translation units.
"""
return Index(conf.lib.clang_createIndex(excludeDecls, 0))
def __del__(self):
conf.lib.clang_disposeIndex(self)
def read(self, path):
"""Load a TranslationUnit from the given AST file."""
return TranslationUnit.from_ast_file(path, self)
def parse(self, path, args=None, unsaved_files=None, options = 0):
"""Load the translation unit from the given source code file by running
clang and generating the AST before loading. Additional command line
parameters can be passed to clang via the args parameter.
In-memory contents for files can be provided by passing a list of pairs
to as unsaved_files, the first item should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
If an error was encountered during parsing, a TranslationUnitLoadError
will be raised.
"""
return TranslationUnit.from_source(path, args, unsaved_files, options,
self)
class TranslationUnit(ClangObject):
"""Represents a source code translation unit.
This is one of the main types in the API. Any time you wish to interact
with Clang's representation of a source file, you typically start with a
translation unit.
"""
# Default parsing mode.
PARSE_NONE = 0
# Instruct the parser to create a detailed processing record containing
# metadata not normally retained.
PARSE_DETAILED_PROCESSING_RECORD = 1
# Indicates that the translation unit is incomplete. This is typically used
# when parsing headers.
PARSE_INCOMPLETE = 2
# Instruct the parser to create a pre-compiled preamble for the translation
# unit. This caches the preamble (included files at top of source file).
# This is useful if the translation unit will be reparsed and you don't
# want to incur the overhead of reparsing the preamble.
PARSE_PRECOMPILED_PREAMBLE = 4
# Cache code completion information on parse. This adds time to parsing but
# speeds up code completion.
PARSE_CACHE_COMPLETION_RESULTS = 8
# Flags with values 16 and 32 are deprecated and intentionally omitted.
# Do not parse function bodies. This is useful if you only care about
# searching for declarations/definitions.
PARSE_SKIP_FUNCTION_BODIES = 64
# Used to indicate that brief documentation comments should be included
# into the set of code completions returned from this translation unit.
PARSE_INCLUDE_BRIEF_COMMENTS_IN_CODE_COMPLETION = 128
@classmethod
def from_source(cls, filename, args=None, unsaved_files=None, options=0,
index=None):
"""Create a TranslationUnit by parsing source.
This is capable of processing source code both from files on the
filesystem as well as in-memory contents.
Command-line arguments that would be passed to clang are specified as
a list via args. These can be used to specify include paths, warnings,
etc. e.g. ["-Wall", "-I/path/to/include"].
In-memory file content can be provided via unsaved_files. This is an
iterable of 2-tuples. The first element is the str filename. The
second element defines the content. Content can be provided as str
source code or as file objects (anything with a read() method). If
a file object is being used, content will be read until EOF and the
read cursor will not be reset to its original position.
options is a bitwise or of TranslationUnit.PARSE_XXX flags which will
control parsing behavior.
index is an Index instance to utilize. If not provided, a new Index
will be created for this TranslationUnit.
To parse source from the filesystem, the filename of the file to parse
is specified by the filename argument. Or, filename could be None and
the args list would contain the filename(s) to parse.
To parse source from an in-memory buffer, set filename to the virtual
filename you wish to associate with this source (e.g. "test.c"). The
contents of that file are then provided in unsaved_files.
If an error occurs, a TranslationUnitLoadError is raised.
Please note that a TranslationUnit with parser errors may be returned.
It is the caller's responsibility to check tu.diagnostics for errors.
Also note that Clang infers the source language from the extension of
the input filename. If you pass in source code containing a C++ class
declaration with the filename "test.c" parsing will fail.
"""
if args is None:
args = []
if unsaved_files is None:
unsaved_files = []
if index is None:
index = Index.create()
args_array = None
if len(args) > 0:
args_array = (c_string_p * len(args))()
for i,a in enumerate(args):
args_array[i] = c_string_p(a)
unsaved_array = None
if len(unsaved_files) > 0:
unsaved_array = (_CXUnsavedFile * len(unsaved_files))()
for i, (name, contents) in enumerate(unsaved_files):
if hasattr(contents, "read"):
contents = contents.read()
unsaved_array[i].name = c_string_p(name)
unsaved_array[i].contents = c_string_p(contents)
unsaved_array[i].length = len(contents)
ptr = conf.lib.clang_parseTranslationUnit(index, filename, args_array,
len(args), unsaved_array,
len(unsaved_files), options)
if not ptr:
raise TranslationUnitLoadError("Error parsing translation unit.")
return cls(ptr, index=index)
@classmethod
def from_ast_file(cls, filename, index=None):
"""Create a TranslationUnit instance from a saved AST file.
A previously-saved AST file (provided with -emit-ast or
TranslationUnit.save()) is loaded from the filename specified.
If the file cannot be loaded, a TranslationUnitLoadError will be
raised.
index is optional and is the Index instance to use. If not provided,
a default Index will be created.
"""
if index is None:
index = Index.create()
ptr = conf.lib.clang_createTranslationUnit(index, filename)
if not ptr:
raise TranslationUnitLoadError(filename)
return cls(ptr=ptr, index=index)
def __init__(self, ptr, index):
"""Create a TranslationUnit instance.
TranslationUnits should be created using one of the from_* @classmethod
functions above. __init__ is only called internally.
"""
assert isinstance(index, Index)
self.index = index
ClangObject.__init__(self, ptr)
def __del__(self):
conf.lib.clang_disposeTranslationUnit(self)
@property
def cursor(self):
"""Retrieve the cursor that represents the given translation unit."""
return conf.lib.clang_getTranslationUnitCursor(self)
@property
def spelling(self):
"""Get the original translation unit source file name."""
return str(conf.lib.clang_getTranslationUnitSpelling(self))
def get_includes(self):
"""
Return an iterable sequence of FileInclusion objects that describe the
sequence of inclusions in a translation unit. The first object in
this sequence is always the input file. Note that this method will not
recursively iterate over header files included through precompiled
headers.
"""
def visitor(fobj, lptr, depth, includes):
if depth > 0:
loc = lptr.contents
includes.append(FileInclusion(loc.file, File(fobj), loc, depth))
# Automatically adapt CIndex/ctype pointers to python objects
includes = []
conf.lib.clang_getInclusions(self,
callbacks['translation_unit_includes'](visitor), includes)
return iter(includes)
def get_file(self, filename):
"""Obtain a File from this translation unit."""
return File.from_name(self, filename)
def get_location(self, filename, position):
"""Obtain a SourceLocation for a file in this translation unit.
The position can be specified by passing:
- Integer file offset. Initial file offset is 0.
- 2-tuple of (line number, column number). Initial file position is
(0, 0)
"""
f = self.get_file(filename)
if isinstance(position, int):
return SourceLocation.from_offset(self, f, position)
return SourceLocation.from_position(self, f, position[0], position[1])
def get_extent(self, filename, locations):
"""Obtain a SourceRange from this translation unit.
The bounds of the SourceRange must ultimately be defined by a start and
end SourceLocation. For the locations argument, you can pass:
- 2 SourceLocation instances in a 2-tuple or list.
- 2 int file offsets via a 2-tuple or list.
- 2 2-tuple or lists of (line, column) pairs in a 2-tuple or list.
e.g.
get_extent('foo.c', (5, 10))
get_extent('foo.c', ((1, 1), (1, 15)))
"""
f = self.get_file(filename)
if len(locations) < 2:
raise Exception('Must pass object with at least 2 elements')
start_location, end_location = locations
if hasattr(start_location, '__len__'):
start_location = SourceLocation.from_position(self, f,
start_location[0], start_location[1])
elif isinstance(start_location, int):
start_location = SourceLocation.from_offset(self, f,
start_location)
if hasattr(end_location, '__len__'):
end_location = SourceLocation.from_position(self, f,
end_location[0], end_location[1])
elif isinstance(end_location, int):
end_location = SourceLocation.from_offset(self, f, end_location)
assert isinstance(start_location, SourceLocation)
assert isinstance(end_location, SourceLocation)
return SourceRange.from_locations(start_location, end_location)
@property
def diagnostics(self):
"""
Return an iterable (and indexable) object containing the diagnostics.
"""
class DiagIterator:
def __init__(self, tu):
self.tu = tu
def __len__(self):
return int(conf.lib.clang_getNumDiagnostics(self.tu))
def __getitem__(self, key):
diag = conf.lib.clang_getDiagnostic(self.tu, key)
if not diag:
raise IndexError
return Diagnostic(diag)
return DiagIterator(self)
def reparse(self, unsaved_files=None, options=0):
"""
Reparse an already parsed translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
# FIXME: It would be great to support an efficient version
# of this, one day.
value = value.read()
print(value)
if not isinstance(value, str):
raise TypeError('Unexpected unsaved file contents.')
unsaved_files_array[i].name = name
unsaved_files_array[i].contents = value
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_reparseTranslationUnit(self, len(unsaved_files),
unsaved_files_array, options)
def save(self, filename):
"""Saves the TranslationUnit to a file.
This is equivalent to passing -emit-ast to the clang frontend. The
saved file can be loaded back into a TranslationUnit. Or, if it
corresponds to a header, it can be used as a pre-compiled header file.
If an error occurs while saving, a TranslationUnitSaveError is raised.
If the error was TranslationUnitSaveError.ERROR_INVALID_TU, this means
the constructed TranslationUnit was not valid at time of save. In this
case, the reason(s) why should be available via
TranslationUnit.diagnostics().
filename -- The path to save the translation unit to.
"""
options = conf.lib.clang_defaultSaveOptions(self)
result = int(conf.lib.clang_saveTranslationUnit(self, filename,
options))
if result != 0:
raise TranslationUnitSaveError(result,
'Error saving TranslationUnit.')
def codeComplete(self, path, line, column, unsaved_files=None,
include_macros=False, include_code_patterns=False,
include_brief_comments=False):
"""
Code complete in this translation unit.
In-memory contents for files can be provided by passing a list of pairs
as unsaved_files, the first items should be the filenames to be mapped
and the second should be the contents to be substituted for the
file. The contents may be passed as strings or file objects.
"""
options = 0
if include_macros:
options += 1
if include_code_patterns:
options += 2
if include_brief_comments:
options += 4
if unsaved_files is None:
unsaved_files = []
unsaved_files_array = 0
if len(unsaved_files):
unsaved_files_array = (_CXUnsavedFile * len(unsaved_files))()
for i,(name,value) in enumerate(unsaved_files):
if not isinstance(value, str):
# FIXME: It would be great to support an efficient version
# of this, one day.
value = value.read()
print(value)
if not isinstance(value, str):
raise TypeError('Unexpected unsaved file contents.')
unsaved_files_array[i].name = c_string_p(name)
unsaved_files_array[i].contents = c_string_p(value)
unsaved_files_array[i].length = len(value)
ptr = conf.lib.clang_codeCompleteAt(self, path, line, column,
unsaved_files_array, len(unsaved_files), options)
if ptr:
return CodeCompletionResults(ptr)
return None
def get_tokens(self, locations=None, extent=None):
"""Obtain tokens in this translation unit.
This is a generator for Token instances. The caller specifies a range
of source code to obtain tokens for. The range can be specified as a
2-tuple of SourceLocation or as a SourceRange. If both are defined,
behavior is undefined.
"""
if locations is not None:
extent = SourceRange(start=locations[0], end=locations[1])
return TokenGroup.get_tokens(self, extent)
class File(ClangObject):
"""
The File class represents a particular source file that is part of a
translation unit.
"""
@staticmethod
def from_name(translation_unit, file_name):
"""Retrieve a file handle within the given translation unit."""
return File(conf.lib.clang_getFile(translation_unit, file_name))
@property
def name(self):
"""Return the complete file and path name of the file."""
return str(conf.lib.clang_getCString(conf.lib.clang_getFileName(self)))
@property
def time(self):
"""Return the last modification time of the file."""
return conf.lib.clang_getFileTime(self)
def __str__(self):
return str(self.name)
def __repr__(self):
return "<File: %s>" % (self.name)
@staticmethod
def from_cursor_result(res, fn, args):
assert isinstance(res, File)
# Copy a reference to the TranslationUnit to prevent premature GC.
res._tu = args[0]._tu
return res
class FileInclusion(object):
"""
The FileInclusion class represents the inclusion of one source file by
another via a '#include' directive or as the input file for the translation
unit. This class provides information about the included file, the including
file, the location of the '#include' directive and the depth of the included
file in the stack. Note that the input file has depth 0.
"""
def __init__(self, src, tgt, loc, depth):
self.source = src
self.include = tgt
self.location = loc
self.depth = depth
@property
def is_input_file(self):
"""True if the included file is the input file."""
return self.depth == 0
class CompilationDatabaseError(Exception):
"""Represents an error that occurred when working with a CompilationDatabase
Each error is associated to an enumerated value, accessible under
e.cdb_error. Consumers can compare the value with one of the ERROR_
constants in this class.
"""
# An unknown error occurred
ERROR_UNKNOWN = 0
# The database could not be loaded
ERROR_CANNOTLOADDATABASE = 1
def __init__(self, enumeration, message):
assert isinstance(enumeration, int)
if enumeration > 1:
raise Exception("Encountered undefined CompilationDatabase error "
"constant: %d. Please file a bug to have this "
"value supported." % enumeration)
self.cdb_error = enumeration
Exception.__init__(self, 'Error %d: %s' % (enumeration, message))
class CompileCommand(object):
"""Represents the compile command used to build a file"""
def __init__(self, cmd, ccmds):
self.cmd = cmd
# Keep a reference to the originating CompileCommands
# to prevent garbage collection
self.ccmds = ccmds
@property
def directory(self):
"""Get the working directory for this CompileCommand"""
return str(conf.lib.clang_CompileCommand_getDirectory(self.cmd))
@property
def filename(self):
"""Get the working filename for this CompileCommand"""
return str(conf.lib.clang_CompileCommand_getFilename(self.cmd))
@property
def arguments(self):
"""
Get an iterable object providing each argument in the
command line for the compiler invocation as a _CXString.
Invariant : the first argument is the compiler executable
"""
length = conf.lib.clang_CompileCommand_getNumArgs(self.cmd)
for i in xrange(length):
yield str(conf.lib.clang_CompileCommand_getArg(self.cmd, i))
class CompileCommands(object):
"""
CompileCommands is an iterable object containing all CompileCommand
that can be used for building a specific file.
"""
def __init__(self, ccmds):
self.ccmds = ccmds
def __del__(self):
conf.lib.clang_CompileCommands_dispose(self.ccmds)
def __len__(self):
return int(conf.lib.clang_CompileCommands_getSize(self.ccmds))
def __getitem__(self, i):
cc = conf.lib.clang_CompileCommands_getCommand(self.ccmds, i)
if not cc:
raise IndexError
return CompileCommand(cc, self)
@staticmethod
def from_result(res, fn, args):
if not res:
return None
return CompileCommands(res)
class CompilationDatabase(ClangObject):
"""
The CompilationDatabase is a wrapper class around
clang::tooling::CompilationDatabase
It enables querying how a specific source file can be built.
"""
def __del__(self):
conf.lib.clang_CompilationDatabase_dispose(self)
@staticmethod
def from_result(res, fn, args):
if not res:
raise CompilationDatabaseError(0,
"CompilationDatabase loading failed")
return CompilationDatabase(res)
@staticmethod
def fromDirectory(buildDir):
"""Builds a CompilationDatabase from the database found in buildDir"""
errorCode = c_uint()
try:
cdb = conf.lib.clang_CompilationDatabase_fromDirectory(buildDir,
byref(errorCode))
except CompilationDatabaseError as e:
raise CompilationDatabaseError(int(errorCode.value),
"CompilationDatabase loading failed")
return cdb
def getCompileCommands(self, filename):
"""
Get an iterable object providing all the CompileCommands available to
build filename. Returns None if filename is not found in the database.
"""
return conf.lib.clang_CompilationDatabase_getCompileCommands(self,
filename)
def getAllCompileCommands(self):
"""
Get an iterable object providing all the CompileCommands available from
the database.
"""
return conf.lib.clang_CompilationDatabase_getAllCompileCommands(self)
class Token(Structure):
"""Represents a single token from the preprocessor.
Tokens are effectively segments of source code. Source code is first parsed
into tokens before being converted into the AST and Cursors.
Tokens are obtained from parsed TranslationUnit instances. You currently
can't create tokens manually.
"""
_fields_ = [
('int_data', c_uint * 4),
('ptr_data', c_void_p)
]
@property
def spelling(self):
"""The spelling of this token.
This is the textual representation of the token in source.
"""
return str(conf.lib.clang_getTokenSpelling(self._tu, self))
@property
def kind(self):
"""Obtain the TokenKind of the current token."""
return TokenKind.from_value(conf.lib.clang_getTokenKind(self))
@property
def location(self):
"""The SourceLocation this Token occurs at."""
return conf.lib.clang_getTokenLocation(self._tu, self)
@property
def extent(self):
"""The SourceRange this Token occupies."""
return conf.lib.clang_getTokenExtent(self._tu, self)
@property
def cursor(self):
"""The Cursor this Token corresponds to."""
cursor = Cursor()
conf.lib.clang_annotateTokens(self._tu, byref(self), 1, byref(cursor))
return cursor
# Now comes the plumbing to hook up the C library.
# Register callback types in common container.
callbacks['translation_unit_includes'] = CFUNCTYPE(None, c_object_p,
POINTER(SourceLocation), c_uint, py_object)
callbacks['cursor_visit'] = CFUNCTYPE(c_int, Cursor, Cursor, py_object)
callbacks['fields_visit'] = CFUNCTYPE(c_int, Cursor, py_object)
# Functions strictly alphabetical order.
functionList = [
("clang_annotateTokens",
[TranslationUnit, POINTER(Token), c_uint, POINTER(Cursor)]),
("clang_CompilationDatabase_dispose",
[c_object_p]),
("clang_CompilationDatabase_fromDirectory",
[c_string_p, POINTER(c_uint)],
c_object_p,
CompilationDatabase.from_result),
("clang_CompilationDatabase_getAllCompileCommands",
[c_object_p],
c_object_p,
CompileCommands.from_result),
("clang_CompilationDatabase_getCompileCommands",
[c_object_p, c_string_p],
c_object_p,
CompileCommands.from_result),
("clang_CompileCommands_dispose",
[c_object_p]),
("clang_CompileCommands_getCommand",
[c_object_p, c_uint],
c_object_p),
("clang_CompileCommands_getSize",
[c_object_p],
c_uint),
("clang_CompileCommand_getArg",
[c_object_p, c_uint],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getDirectory",
[c_object_p],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getFilename",
[c_object_p],
_CXString,
_CXString.from_result),
("clang_CompileCommand_getNumArgs",
[c_object_p],
c_uint),
("clang_codeCompleteAt",
[TranslationUnit, c_string_p, c_int, c_int, c_void_p, c_int, c_int],
POINTER(CCRStructure)),
("clang_codeCompleteGetDiagnostic",
[CodeCompletionResults, c_int],
Diagnostic),
("clang_codeCompleteGetNumDiagnostics",
[CodeCompletionResults],
c_int),
("clang_createIndex",
[c_int, c_int],
c_object_p),
("clang_createTranslationUnit",
[Index, c_string_p],
c_object_p),
("clang_CXXConstructor_isConvertingConstructor",
[Cursor],
bool),
("clang_CXXConstructor_isCopyConstructor",
[Cursor],
bool),
("clang_CXXConstructor_isDefaultConstructor",
[Cursor],
bool),
("clang_CXXConstructor_isMoveConstructor",
[Cursor],
bool),
("clang_CXXField_isMutable",
[Cursor],
bool),
("clang_CXXMethod_isConst",
[Cursor],
bool),
("clang_CXXMethod_isDefaulted",
[Cursor],
bool),
("clang_CXXMethod_isPureVirtual",
[Cursor],
bool),
("clang_CXXMethod_isStatic",
[Cursor],
bool),
("clang_CXXMethod_isVirtual",
[Cursor],
bool),
("clang_defaultDiagnosticDisplayOptions",
[],
c_uint),
("clang_defaultSaveOptions",
[TranslationUnit],
c_uint),
("clang_disposeCodeCompleteResults",
[CodeCompletionResults]),
# ("clang_disposeCXTUResourceUsage",
# [CXTUResourceUsage]),
("clang_disposeDiagnostic",
[Diagnostic]),
("clang_disposeIndex",
[Index]),
("clang_disposeString",
[_CXString]),
("clang_disposeTokens",
[TranslationUnit, POINTER(Token), c_uint]),
("clang_disposeTranslationUnit",
[TranslationUnit]),
("clang_equalCursors",
[Cursor, Cursor],
bool),
("clang_equalLocations",
[SourceLocation, SourceLocation],
bool),
("clang_equalRanges",
[SourceRange, SourceRange],
bool),
("clang_equalTypes",
[Type, Type],
bool),
("clang_formatDiagnostic",
[Diagnostic, c_uint],
_CXString),
("clang_getArgType",
[Type, c_uint],
Type,
Type.from_result),
("clang_getArrayElementType",
[Type],
Type,
Type.from_result),
("clang_getArraySize",
[Type],
c_longlong),
("clang_getFieldDeclBitWidth",
[Cursor],
c_int),
("clang_getCanonicalCursor",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCanonicalType",
[Type],
Type,
Type.from_result),
("clang_getChildDiagnostics",
[Diagnostic],
c_object_p),
("clang_getCompletionAvailability",
[c_void_p],
c_int),
("clang_getCompletionBriefComment",
[c_void_p],
_CXString),
("clang_getCompletionChunkCompletionString",
[c_void_p, c_int],
c_object_p),
("clang_getCompletionChunkKind",
[c_void_p, c_int],
c_int),
("clang_getCompletionChunkText",
[c_void_p, c_int],
_CXString),
("clang_getCompletionPriority",
[c_void_p],
c_int),
("clang_getCString",
[_CXString],
c_string_p),
("clang_getCursor",
[TranslationUnit, SourceLocation],
Cursor),
("clang_getCursorDefinition",
[Cursor],
Cursor,
Cursor.from_result),
("clang_getCursorDisplayName",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getCursorExtent",
[Cursor],
SourceRange),
("clang_getCursorLexicalParent",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCursorLocation",
[Cursor],
SourceLocation),
("clang_getCursorReferenced",
[Cursor],
Cursor,
Cursor.from_result),
("clang_getCursorReferenceNameRange",
[Cursor, c_uint, c_uint],
SourceRange),
("clang_getCursorSemanticParent",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getCursorSpelling",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getCursorType",
[Cursor],
Type,
Type.from_result),
("clang_getCursorUSR",
[Cursor],
_CXString,
_CXString.from_result),
("clang_Cursor_getMangling",
[Cursor],
_CXString,
_CXString.from_result),
# ("clang_getCXTUResourceUsage",
# [TranslationUnit],
# CXTUResourceUsage),
("clang_getCXXAccessSpecifier",
[Cursor],
c_uint),
("clang_getDeclObjCTypeEncoding",
[Cursor],
_CXString,
_CXString.from_result),
("clang_getDiagnostic",
[c_object_p, c_uint],
c_object_p),
("clang_getDiagnosticCategory",
[Diagnostic],
c_uint),
("clang_getDiagnosticCategoryText",
[Diagnostic],
_CXString,
_CXString.from_result),
("clang_getDiagnosticFixIt",
[Diagnostic, c_uint, POINTER(SourceRange)],
_CXString,
_CXString.from_result),
("clang_getDiagnosticInSet",
[c_object_p, c_uint],
c_object_p),
("clang_getDiagnosticLocation",
[Diagnostic],
SourceLocation),
("clang_getDiagnosticNumFixIts",
[Diagnostic],
c_uint),
("clang_getDiagnosticNumRanges",
[Diagnostic],
c_uint),
("clang_getDiagnosticOption",
[Diagnostic, POINTER(_CXString)],
_CXString,
_CXString.from_result),
("clang_getDiagnosticRange",
[Diagnostic, c_uint],
SourceRange),
("clang_getDiagnosticSeverity",
[Diagnostic],
c_int),
("clang_getDiagnosticSpelling",
[Diagnostic],
_CXString,
_CXString.from_result),
("clang_getElementType",
[Type],
Type,
Type.from_result),
("clang_getEnumConstantDeclUnsignedValue",
[Cursor],
c_ulonglong),
("clang_getEnumConstantDeclValue",
[Cursor],
c_longlong),
("clang_getEnumDeclIntegerType",
[Cursor],
Type,
Type.from_result),
("clang_getFile",
[TranslationUnit, c_string_p],
c_object_p),
("clang_getFileName",
[File],
_CXString), # TODO go through _CXString.from_result?
("clang_getFileTime",
[File],
c_uint),
("clang_getIBOutletCollectionType",
[Cursor],
Type,
Type.from_result),
("clang_getIncludedFile",
[Cursor],
File,
File.from_cursor_result),
("clang_getInclusions",
[TranslationUnit, callbacks['translation_unit_includes'], py_object]),
("clang_getInstantiationLocation",
[SourceLocation, POINTER(c_object_p), POINTER(c_uint), POINTER(c_uint),
POINTER(c_uint)]),
("clang_getLocation",
[TranslationUnit, File, c_uint, c_uint],
SourceLocation),
("clang_getLocationForOffset",
[TranslationUnit, File, c_uint],
SourceLocation),
("clang_getNullCursor",
None,
Cursor),
("clang_getNumArgTypes",
[Type],
c_uint),
("clang_getNumCompletionChunks",
[c_void_p],
c_int),
("clang_getNumDiagnostics",
[c_object_p],
c_uint),
("clang_getNumDiagnosticsInSet",
[c_object_p],
c_uint),
("clang_getNumElements",
[Type],
c_longlong),
("clang_getNumOverloadedDecls",
[Cursor],
c_uint),
("clang_getOverloadedDecl",
[Cursor, c_uint],
Cursor,
Cursor.from_cursor_result),
("clang_getPointeeType",
[Type],
Type,
Type.from_result),
("clang_getRange",
[SourceLocation, SourceLocation],
SourceRange),
("clang_getRangeEnd",
[SourceRange],
SourceLocation),
("clang_getRangeStart",
[SourceRange],
SourceLocation),
("clang_getResultType",
[Type],
Type,
Type.from_result),
("clang_getSpecializedCursorTemplate",
[Cursor],
Cursor,
Cursor.from_cursor_result),
("clang_getTemplateCursorKind",
[Cursor],
c_uint),
("clang_getTokenExtent",
[TranslationUnit, Token],
SourceRange),
("clang_getTokenKind",
[Token],
c_uint),
("clang_getTokenLocation",
[TranslationUnit, Token],
SourceLocation),
("clang_getTokenSpelling",
[TranslationUnit, Token],
_CXString,
_CXString.from_result),
("clang_getTranslationUnitCursor",
[TranslationUnit],
Cursor,
Cursor.from_result),
("clang_getTranslationUnitSpelling",
[TranslationUnit],
_CXString,
_CXString.from_result),
("clang_getTUResourceUsageName",
[c_uint],
c_string_p),
("clang_getTypeDeclaration",
[Type],
Cursor,
Cursor.from_result),
("clang_getTypedefDeclUnderlyingType",
[Cursor],
Type,
Type.from_result),
("clang_getTypeKindSpelling",
[c_uint],
_CXString,
_CXString.from_result),
("clang_getTypeSpelling",
[Type],
_CXString,
_CXString.from_result),
("clang_hashCursor",
[Cursor],
c_uint),
("clang_isAttribute",
[CursorKind],
bool),
("clang_isConstQualifiedType",
[Type],
bool),
("clang_isCursorDefinition",
[Cursor],
bool),
("clang_isDeclaration",
[CursorKind],
bool),
("clang_isExpression",
[CursorKind],
bool),
("clang_isFileMultipleIncludeGuarded",
[TranslationUnit, File],
bool),
("clang_isFunctionTypeVariadic",
[Type],
bool),
("clang_isInvalid",
[CursorKind],
bool),
("clang_isPODType",
[Type],
bool),
("clang_isPreprocessing",
[CursorKind],
bool),
("clang_isReference",
[CursorKind],
bool),
("clang_isRestrictQualifiedType",
[Type],
bool),
("clang_isStatement",
[CursorKind],
bool),
("clang_isTranslationUnit",
[CursorKind],
bool),
("clang_isUnexposed",
[CursorKind],
bool),
("clang_isVirtualBase",
[Cursor],
bool),
("clang_isVolatileQualifiedType",
[Type],
bool),
("clang_parseTranslationUnit",
[Index, c_string_p, c_void_p, c_int, c_void_p, c_int, c_int],
c_object_p),
("clang_reparseTranslationUnit",
[TranslationUnit, c_int, c_void_p, c_int],
c_int),
("clang_saveTranslationUnit",
[TranslationUnit, c_string_p, c_uint],
c_int),
("clang_tokenize",
[TranslationUnit, SourceRange, POINTER(POINTER(Token)), POINTER(c_uint)]),
("clang_visitChildren",
[Cursor, callbacks['cursor_visit'], py_object],
c_uint),
("clang_Cursor_getNumArguments",
[Cursor],
c_int),
("clang_Cursor_getArgument",
[Cursor, c_uint],
Cursor,
Cursor.from_result),
("clang_Cursor_getNumTemplateArguments",
[Cursor],
c_int),
("clang_Cursor_getTemplateArgumentKind",
[Cursor, c_uint],
TemplateArgumentKind.from_id),
("clang_Cursor_getTemplateArgumentType",
[Cursor, c_uint],
Type,
Type.from_result),
("clang_Cursor_getTemplateArgumentValue",
[Cursor, c_uint],
c_longlong),
("clang_Cursor_getTemplateArgumentUnsignedValue",
[Cursor, c_uint],
c_ulonglong),
("clang_Cursor_isAnonymous",
[Cursor],
bool),
("clang_Cursor_isBitField",
[Cursor],
bool),
("clang_Cursor_getBriefCommentText",
[Cursor],
_CXString,
_CXString.from_result),
("clang_Cursor_getRawCommentText",
[Cursor],
_CXString,
_CXString.from_result),
("clang_Cursor_getOffsetOfField",
[Cursor],
c_longlong),
("clang_Type_getAlignOf",
[Type],
c_longlong),
("clang_Type_getClassType",
[Type],
Type,
Type.from_result),
("clang_Type_getOffsetOf",
[Type, c_string_p],
c_longlong),
("clang_Type_getSizeOf",
[Type],
c_longlong),
("clang_Type_getCXXRefQualifier",
[Type],
c_uint),
("clang_Type_getNamedType",
[Type],
Type,
Type.from_result),
("clang_Type_visitFields",
[Type, callbacks['fields_visit'], py_object],
c_uint),
]
class LibclangError(Exception):
def __init__(self, message):
self.m = message
def __str__(self):
return self.m
def register_function(lib, item, ignore_errors):
# A function may not exist, if these bindings are used with an older or
# incompatible version of libclang.so.
try:
func = getattr(lib, item[0])
except AttributeError as e:
msg = str(e) + ". Please ensure that your python bindings are "\
"compatible with your libclang.so version."
if ignore_errors:
return
raise LibclangError(msg)
if len(item) >= 2:
func.argtypes = item[1]
if len(item) >= 3:
func.restype = item[2]
if len(item) == 4:
func.errcheck = item[3]
def register_functions(lib, ignore_errors):
"""Register function prototypes with a libclang library instance.
This must be called as part of library instantiation so Python knows how
to call out to the shared library.
"""
def register(item):
return register_function(lib, item, ignore_errors)
for f in functionList:
register(f)
class Config:
library_path = None
library_file = None
compatibility_check = False
loaded = False
@staticmethod
def set_library_path(path):
"""Set the path in which to search for libclang"""
if Config.loaded:
raise Exception("library path must be set before before using " \
"any other functionalities in libclang.")
Config.library_path = path
@staticmethod
def set_library_file(filename):
"""Set the exact location of libclang"""
if Config.loaded:
raise Exception("library file must be set before before using " \
"any other functionalities in libclang.")
Config.library_file = filename
@staticmethod
def set_compatibility_check(check_status):
""" Perform compatibility check when loading libclang
The python bindings are only tested and evaluated with the version of
libclang they are provided with. To ensure correct behavior a (limited)
compatibility check is performed when loading the bindings. This check
will throw an exception, as soon as it fails.
In case these bindings are used with an older version of libclang, parts
that have been stable between releases may still work. Users of the
python bindings can disable the compatibility check. This will cause
the python bindings to load, even though they are written for a newer
version of libclang. Failures now arise if unsupported or incompatible
features are accessed. The user is required to test themselves if the
features they are using are available and compatible between different
libclang versions.
"""
if Config.loaded:
raise Exception("compatibility_check must be set before before " \
"using any other functionalities in libclang.")
Config.compatibility_check = check_status
@CachedProperty
def lib(self):
lib = self.get_cindex_library()
register_functions(lib, not Config.compatibility_check)
Config.loaded = True
return lib
def get_filename(self):
if Config.library_file:
return Config.library_file
import platform
name = platform.system()
if name == 'Darwin':
file = 'libclang.dylib'
elif name == 'Windows':
file = 'libclang.dll'
else:
file = 'libclang.so'
if Config.library_path:
file = Config.library_path + '/' + file
return file
def get_cindex_library(self):
try:
library = cdll.LoadLibrary(self.get_filename())
except OSError as e:
msg = str(e) + ". To provide a path to libclang use " \
"Config.set_library_path() or " \
"Config.set_library_file()."
raise LibclangError(msg)
return library
def function_exists(self, name):
try:
getattr(self.lib, name)
except AttributeError:
return False
return True
def register_enumerations():
for name, value in enumerations.TokenKinds:
TokenKind.register(value, name)
conf = Config()
register_enumerations()
__all__ = [
'Config',
'CodeCompletionResults',
'CompilationDatabase',
'CompileCommands',
'CompileCommand',
'CursorKind',
'Cursor',
'Diagnostic',
'File',
'FixIt',
'Index',
'SourceLocation',
'SourceRange',
'TokenKind',
'Token',
'TranslationUnitLoadError',
'TranslationUnit',
'TypeKind',
'Type',
]
|
thiblahute/hotdoc
|
hotdoc/extensions/c/clang/cindex.py
|
Python
|
lgpl-2.1
| 117,967
|
"""
hapefile.py
Provides read and write support for ESRI Shapefiles.
author: jlawhead<at>geospatialpython.com
date: 20110927
version: 1.1.4
Compatible with Python versions 2.4-3.x
"""
from struct import pack, unpack, calcsize, error
import os
import sys
import time
import array
#
# Constants for shape types
NULL = 0
POINT = 1
POLYLINE = 3
POLYGON = 5
MULTIPOINT = 8
POINTZ = 11
POLYLINEZ = 13
POLYGONZ = 15
MULTIPOINTZ = 18
POINTM = 21
POLYLINEM = 23
POLYGONM = 25
MULTIPOINTM = 28
MULTIPATCH = 31
PYTHON3 = sys.version_info[0] == 3
def b(v):
if PYTHON3:
if isinstance(v, str):
# For python 3 encode str to bytes.
return v.encode('utf-8')
elif isinstance(v, bytes):
# Already bytes.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def u(v):
if PYTHON3:
if isinstance(v, bytes):
# For python 3 decode bytes to str.
return v.decode('utf-8')
elif isinstance(v, str):
# Already str.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def is_string(v):
if PYTHON3:
return isinstance(v, str)
else:
return isinstance(v, basestring)
class _Array(array.array):
"""Converts python tuples to lits of the appropritate type.
Used to unpack different shapefile header parts."""
def __repr__(self):
return str(self.tolist())
class _Shape:
def __init__(self, shapeType=None):
"""Stores the geometry of the different shape types
specified in the Shapefile spec. Shape types are
usually point, polyline, or polygons. Every shape type
except the "Null" type contains points at some level for
example verticies in a polygon. If a shape type has
multiple shapes containing points within a single
geometry record then those shapes are called parts. Parts
are designated by their starting index in geometry record's
list of shapes."""
self.shapeType = shapeType
self.points = []
class _ShapeRecord:
"""A shape object of any type."""
def __init__(self, shape=None, record=None):
self.shape = shape
self.record = record
class ShapefileException(Exception):
"""An exception to handle shapefile specific problems."""
pass
class Reader:
"""Reads the three files of a shapefile as a unit or
separately. If one of the three files (.shp, .shx,
.dbf) is missing no exception is thrown until you try
to call a method that depends on that particular file.
The .shx index file is used if available for efficiency
but is not required to read the geometry from the .shp
file. The "shapefile" argument in the constructor is the
name of the file you want to open.
You can instantiate a Reader without specifying a shapefile
and then specify one later with the load() method.
Only the shapefile headers are read upon loading. Content
within each file is only accessed when required and as
efficiently as possible. Shapefiles are usually not large
but they can be.
"""
def __init__(self, *args, **kwargs):
self.shp = None
self.shx = None
self.dbf = None
self.shapeName = "Not specified"
self._offsets = []
self.shpLength = None
self.numRecords = None
self.fields = []
self.__dbfHdrLength = 0
# See if a shapefile name was passed as an argument
if len(args) > 0:
if type(args[0]) is type("stringTest"):
self.load(args[0])
return
if "shp" in kwargs.keys():
if hasattr(kwargs["shp"], "read"):
self.shp = kwargs["shp"]
if hasattr(self.shp, "seek"):
self.shp.seek(0)
if "shx" in kwargs.keys():
if hasattr(kwargs["shx"], "read"):
self.shx = kwargs["shx"]
if hasattr(self.shx, "seek"):
self.shx.seek(0)
if "dbf" in kwargs.keys():
if hasattr(kwargs["dbf"], "read"):
self.dbf = kwargs["dbf"]
if hasattr(self.dbf, "seek"):
self.dbf.seek(0)
if self.shp or self.dbf:
self.load()
else:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file object or file name as an
argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
try:
self.shp = open("%s.shp" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shp" % shapeName)
try:
self.shx = open("%s.shx" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shx" % shapeName)
try:
self.dbf = open("%s.dbf" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.dbf" % shapeName)
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader()
def __getFileObj(self, f):
"""Checks to see if the requested shapefile file object is
available. If not a ShapefileException is raised."""
if not f:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
if self.shp and self.shpLength is None:
self.load()
if self.dbf and len(self.fields) == 0:
self.load()
return f
def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0: i = range(self.numRecords)[i]
return i
def __shpHeader(self):
"""Reads the header information from a .shp or .shx file."""
if not self.shp:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no shp file found")
shp = self.shp
# File length (16-bit word * 2 = bytes)
shp.seek(24)
self.shpLength = unpack(">i", shp.read(4))[0] * 2
# Shape type
shp.seek(32)
self.shapeType= unpack("<i", shp.read(4))[0]
# The shapefile's bounding box (lower left, upper right)
self.bbox = _Array('d', unpack("<4d", shp.read(32)))
# Elevation
self.elevation = _Array('d', unpack("<2d", shp.read(16)))
# Measure
self.measure = _Array('d', unpack("<2d", shp.read(16)))
def __shape(self):
"""Returns the header info and geometry for a single shape."""
f = self.__getFileObj(self.shp)
record = _Shape()
nParts = nPoints = zmin = zmax = mmin = mmax = None
(recNum, recLength) = unpack(">2i", f.read(8))
shapeType = unpack("<i", f.read(4))[0]
record.shapeType = shapeType
# For Null shapes create an empty points list for consistency
if shapeType == 0:
record.points = []
# All shape types capable of having a bounding box
elif shapeType in (3,5,8,13,15,18,23,25,28,31):
record.bbox = _Array('d', unpack("<4d", f.read(32)))
# Shape types with parts
if shapeType in (3,5,13,15,23,25,31):
nParts = unpack("<i", f.read(4))[0]
# Shape types with points
if shapeType in (3,5,8,13,15,23,25,31):
nPoints = unpack("<i", f.read(4))[0]
# Read parts
if nParts:
record.parts = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read part types for Multipatch - 31
if shapeType == 31:
record.partTypes = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read points - produces a list of [x,y] values
if nPoints:
record.points = [_Array('d', unpack("<2d", f.read(16))) for p in range(nPoints)]
# Read z extremes and values
if shapeType in (13,15,18,31):
(zmin, zmax) = unpack("<2d", f.read(16))
record.z = _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8)))
# Read m extremes and values
if shapeType in (13,15,18,23,25,28,31):
(mmin, mmax) = unpack("<2d", f.read(16))
# Measure values less than -10e38 are nodata values according to the spec
record.m = []
for m in _Array('d', unpack("%sd" % nPoints, f.read(nPoints * 8))):
if m > -10e38:
record.m.append(m)
else:
record.m.append(None)
# Read a single point
if shapeType in (1,11,21):
record.points = [_Array('d', unpack("<2d", f.read(16)))]
# Read a single Z value
if shapeType == 11:
record.z = unpack("<d", f.read(8))
# Read a single M value
if shapeType in (11,21):
record.m = unpack("<d", f.read(8))
return record
def __shapeIndex(self, i=None):
"""Returns the offset in a .shp file for a shape based on information
in the .shx index file."""
shx = self.shx
if not shx:
return None
if not self._offsets:
# File length (16-bit word * 2 = bytes) - header length
shx.seek(24)
shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
numRecords = shxRecordLength // 8
# Jump to the first record.
shx.seek(100)
for r in range(numRecords):
# Offsets are 16-bit words just like the file length
self._offsets.append(unpack(">i", shx.read(4))[0] * 2)
shx.seek(shx.tell() + 4)
if not i == None:
return self._offsets[i]
def shape(self, i=0):
"""Returns a shape object for a shape in the the geometry
record file."""
shp = self.__getFileObj(self.shp)
i = self.__restrictIndex(i)
offset = self.__shapeIndex(i)
if not offset:
# Shx index not available so use the full list.
shapes = self.shapes()
return shapes[i]
shp.seek(offset)
return self.__shape()
def shapes(self):
"""Returns all shapes in a shapefile."""
shp = self.__getFileObj(self.shp)
shp.seek(100)
shapes = []
while shp.tell() < self.shpLength:
shapes.append(self.__shape())
return shapes
def __dbfHeaderLength(self):
"""Retrieves the header length of a dbf file header."""
if not self.__dbfHdrLength:
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
(self.numRecords, self.__dbfHdrLength) = \
unpack("<xxxxLH22x", dbf.read(32))
return self.__dbfHdrLength
def __dbfHeader(self):
"""Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger"""
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
headerLength = self.__dbfHeaderLength()
numFields = (headerLength - 33) // 32
for field in range(numFields):
fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32)))
name = 0
idx = 0
if b("\x00") in fieldDesc[name]:
idx = fieldDesc[name].index(b("\x00"))
else:
idx = len(fieldDesc[name]) - 1
fieldDesc[name] = fieldDesc[name][:idx]
fieldDesc[name] = u(fieldDesc[name])
fieldDesc[name] = fieldDesc[name].lstrip()
fieldDesc[1] = u(fieldDesc[1])
self.fields.append(fieldDesc)
terminator = dbf.read(1)
assert terminator == b("\r")
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
def __recordFmt(self):
"""Calculates the size of a .shp geometry record."""
if not self.numRecords:
self.__dbfHeader()
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
fmtSize = calcsize(fmt)
return (fmt, fmtSize)
def __record(self):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recFmt = self.__recordFmt()
recordContents = unpack(recFmt[0], f.read(recFmt[1]))
if recordContents[0] != b(' '):
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields,
recordContents):
if name == 'DeletionFlag':
continue
elif not value.strip():
record.append(value)
continue
elif typ == "N":
value = value.replace(b('\0'), b('')).strip()
if value == b(''):
value = 0
elif deci:
value = float(value)
else:
value = int(value)
elif typ == b('D'):
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = [y, m, d]
except:
value = value.strip()
elif typ == b('L'):
value = (value in b('YyTt') and b('T')) or \
(value in b('NnFf') and b('F')) or b('?')
else:
value = u(value)
value = value.strip()
record.append(value)
return record
def record(self, i=0):
"""Returns a specific dbf record based on the supplied index."""
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record()
def records(self):
"""Returns all records in a dbf file."""
if not self.numRecords:
self.__dbfHeader()
records = []
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in range(self.numRecords):
r = self.__record()
if r:
records.append(r)
return records
def shapeRecord(self, i=0):
"""Returns a combination geometry and attribute record for the
supplied record index."""
i = self.__restrictIndex(i)
return _ShapeRecord(shape=self.shape(i),
record=self.record(i))
def shapeRecords(self):
"""Returns a list of combination geometry/attribute records for
all records in a shapefile."""
shapeRecords = []
return [_ShapeRecord(shape=rec[0], record=rec[1]) \
for rec in zip(self.shapes(), self.records())]
class Writer:
"""Provides write support for ESRI Shapefiles."""
def __init__(self, shapeType=None):
self._shapes = []
self.fields = []
self.records = []
self.shapeType = shapeType
self.shp = None
self.shx = None
self.dbf = None
# Geometry record offsets and lengths for writing shx file.
self._offsets = []
self._lengths = []
# Use deletion flags in dbf? Default is false (0).
self.deletionFlag = 0
def __getFileObj(self, f):
"""Safety handler to verify file-like objects"""
if not f:
raise ShapefileException("No file-like object available.")
elif hasattr(f, "write"):
return f
else:
pth = os.path.split(f)[0]
if pth and not os.path.exists(pth):
os.makedirs(pth)
return open(f, "wb")
def __shpFileLength(self):
"""Calculates the file length of the shp file."""
# Start with header length
size = 100
# Calculate size of all shapes
for s in self._shapes:
# Add in record header and shape type fields
size += 12
# nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s,'parts'):
nParts = len(s.parts)
if hasattr(s,'points'):
nPoints = len(s.points)
# All shape types capable of having a bounding box
if self.shapeType in (3,5,8,13,15,18,23,25,28,31):
size += 32
# Shape types with parts
if self.shapeType in (3,5,13,15,23,25,31):
# Parts count
size += 4
# Parts index array
size += nParts * 4
# Shape types with points
if self.shapeType in (3,5,8,13,15,23,25,31):
# Points count
size += 4
# Points array
size += 16 * nPoints
# Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4
# Calc z extremes and values
if self.shapeType in (13,15,18,31):
# z extremes
size += 16
# z array
size += 8 * nPoints
# Calc m extremes and values
if self.shapeType in (23,25,31):
# m extremes
size += 16
# m array
size += 8 * nPoints
# Calc a single point
if self.shapeType in (1,11,21):
size += 16
# Calc a single Z value
if self.shapeType == 11:
size += 8
# Calc a single M value
if self.shapeType in (11,21):
size += 8
# Calculate size as 16-bit words
size //= 2
return size
def __bbox(self, shapes, shapeTypes=[]):
x = []
y = []
for s in shapes:
shapeType = self.shapeType
if shapeTypes:
shapeType = shapeTypes[shapes.index(s)]
px, py = list(zip(*s.points))[:2]
x.extend(px)
y.extend(py)
return [min(x), min(y), max(x), max(y)]
def __zbox(self, shapes, shapeTypes=[]):
z = []
for s in shapes:
try:
for p in s.points:
z.append(p[2])
except IndexError:
pass
if not z: z.append(0)
return [min(z), max(z)]
def __mbox(self, shapes, shapeTypes=[]):
m = [0]
for s in shapes:
try:
for p in s.points:
m.append(p[3])
except IndexError:
pass
return [min(m), max(m)]
def bbox(self):
"""Returns the current bounding box for the shapefile which is
the lower-left and upper-right corners. It does not contain the
elevation or measure extremes."""
return self.__bbox(self._shapes)
def zbox(self):
"""Returns the current z extremes for the shapefile."""
return self.__zbox(self._shapes)
def mbox(self):
"""Returns the current m extremes for the shapefile."""
return self.__mbox(self._shapes)
def __shapefileHeader(self, fileObj, headerType='shp'):
"""Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted."""
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.")
def __dbfHeader(self):
"""Writes the dbf header and field descriptors."""
f = self.__getFileObj(self.dbf)
f.seek(0)
version = 3
year, month, day = time.localtime()[:3]
year -= 1900
# Remove deletion flag placeholder from fields
for field in self.fields:
if field[0].startswith("Deletion"):
self.fields.remove(field)
numRecs = len(self.records)
numFields = len(self.fields)
headerLength = numFields * 32 + 33
recordLength = sum([int(field[2]) for field in self.fields]) + 1
header = pack('<BBBBLHH20x', version, year, month, day, numRecs,
headerLength, recordLength)
f.write(header)
# Field descriptors
for field in self.fields:
name, fieldType, size, decimal = field
name = b(name)
name = name.replace(b(' '), b('_'))
name = name.ljust(11).replace(b(' '), b('\x00'))
fieldType = b(fieldType)
size = int(size)
fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)
f.write(fld)
# Terminator
f.write(b('\r'))
def __shpRecords(self):
"""Write the shp records"""
f = self.__getFileObj(self.shp)
f.seek(100)
recNum = 1
for s in self._shapes:
self._offsets.append(f.tell())
# Record number, Content length place holder
f.write(pack(">2i", recNum, 0))
recNum += 1
start = f.tell()
# Shape Type
f.write(pack("<i", s.shapeType))
# All shape types capable of having a bounding box
if s.shapeType in (3,5,8,13,15,18,23,25,28,31):
try:
f.write(pack("<4d", *self.__bbox([s])))
except error:
raise ShapefileException("Falied to write bounding box for record %s. Expected floats." % recNum)
# Shape types with parts
if s.shapeType in (3,5,13,15,23,25,31):
# Number of parts
f.write(pack("<i", len(s.parts)))
# Shape types with multiple points per record
if s.shapeType in (3,5,8,13,15,23,25,31):
# Number of points
f.write(pack("<i", len(s.points)))
# Write part indexes
if s.shapeType in (3,5,13,15,23,25,31):
for p in s.parts:
f.write(pack("<i", p))
# Part types for Multipatch (31)
if s.shapeType == 31:
for pt in s.partTypes:
f.write(pack("<i", pt))
# Write points for multiple-point records
if s.shapeType in (3,5,8,13,15,23,25,31):
try:
[f.write(pack("<2d", *p[:2])) for p in s.points]
except error:
raise ShapefileException("Failed to write points for record %s. Expected floats." % recNum)
# Write z extremes and values
if s.shapeType in (13,15,18,31):
try:
f.write(pack("<2d", *self.__zbox([s])))
except error:
raise ShapefileException("Failed to write elevation extremes for record %s. Expected floats." % recNum)
try:
[f.write(pack("<d", p[2])) for p in s.points]
except error:
raise ShapefileException("Failed to write elevation values for record %s. Expected floats." % recNum)
# Write m extremes and values
if s.shapeType in (23,25,31):
try:
f.write(pack("<2d", *self.__mbox([s])))
except error:
raise ShapefileException("Failed to write measure extremes for record %s. Expected floats" % recNum)
try:
[f.write(pack("<d", p[3])) for p in s.points]
except error:
raise ShapefileException("Failed to write measure values for record %s. Expected floats" % recNum)
# Write a single point
if s.shapeType in (1,11,21):
try:
f.write(pack("<2d", s.points[0][0], s.points[0][1]))
except error:
raise ShapefileException("Failed to write point for record %s. Expected floats." % recNum)
# Write a single Z value
if s.shapeType == 11:
try:
f.write(pack("<1d", s.points[0][2]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
# Write a single M value
if s.shapeType in (11,21):
try:
f.write(pack("<1d", s.points[0][3]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
# Finalize record length as 16-bit words
finish = f.tell()
length = (finish - start) // 2
self._lengths.append(length)
# start - 4 bytes is the content length field
f.seek(start-4)
f.write(pack(">i", length))
f.seek(finish)
def __shxRecords(self):
"""Writes the shx records."""
f = self.__getFileObj(self.shx)
f.seek(100)
for i in range(len(self._shapes)):
f.write(pack(">i", self._offsets[i] // 2))
f.write(pack(">i", self._lengths[i]))
def __dbfRecords(self):
"""Writes the dbf records."""
f = self.__getFileObj(self.dbf)
for record in self.records:
if not self.fields[0][0].startswith("Deletion"):
f.write(b(' ')) # deletion flag
for (fieldName, fieldType, size, dec), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType.upper() == "N":
value = str(value).rjust(size)
elif fieldType == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size)
assert len(value) == size
value = b(value)
f.write(value)
def null(self):
"""Creates a null shape."""
self._shapes.append(_Shape(NULL))
def point(self, x, y, z=0, m=0):
"""Creates a point shape."""
pointShape = _Shape(self.shapeType)
pointShape.points.append([x, y, z, m])
self._shapes.append(pointShape)
def line(self, parts=[], shapeType=POLYLINE):
"""Creates a line shape. This method is just a convienience method
which wraps 'poly()'.
"""
self.poly(parts, shapeType, [])
def poly(self, parts=[], shapeType=POLYGON, partTypes=[]):
"""Creates a shape that has multiple collections of points (parts)
including lines, polygons, and even multipoint shapes. If no shape type
is specified it defaults to 'polygon'. If no part types are specified
(which they normally won't be) then all parts default to the shape type.
"""
polyShape = _Shape(shapeType)
polyShape.parts = []
polyShape.points = []
for part in parts:
polyShape.parts.append(len(polyShape.points))
for point in part:
# Ensure point is list
if not isinstance(point, list):
point = list(point)
# Make sure point has z and m values
while len(point) < 4:
point.append(0)
polyShape.points.append(point)
if polyShape.shapeType == 31:
if not partTypes:
for part in parts:
partTypes.append(polyShape.shapeType)
polyShape.partTypes = partTypes
self._shapes.append(polyShape)
def field(self, name, fieldType="C", size="50", decimal=0):
"""Adds a dbf field descriptor to the shapefile."""
self.fields.append((name, fieldType, size, decimal))
def record(self, *recordList, **recordDict):
"""Creates a dbf attribute record. You can submit either a sequence of
field values or keyword arguments of field names and values. Before
adding records you must add fields for the record values using the
fields() method. If the record values exceed the number of fields the
extra ones won't be added. In the case of using keyword arguments to specify
field/value pairs only fields matching the already registered fields
will be added."""
record = []
fieldCount = len(self.fields)
# Compensate for deletion flag
if self.fields[0][0].startswith("Deletion"): fieldCount -= 1
if recordList:
[record.append(recordList[i]) for i in range(fieldCount)]
elif recordDict:
for field in self.fields:
if field[0] in recordDict:
val = recordDict[field[0]]
if val:
record.append(val)
else:
record.append("")
if record:
self.records.append(record)
def shape(self, i):
return self._shapes[i]
def shapes(self):
"""Return the current list of shapes."""
return self._shapes
def saveShp(self, target):
"""Save an shp file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shp'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shp = self.__getFileObj(target)
self.__shapefileHeader(self.shp, headerType='shp')
self.__shpRecords()
def saveShx(self, target):
"""Save an shx file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shx'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shx = self.__getFileObj(target)
self.__shapefileHeader(self.shx, headerType='shx')
self.__shxRecords()
def saveDbf(self, target):
"""Save a dbf file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.dbf'
self.dbf = self.__getFileObj(target)
self.__dbfHeader()
self.__dbfRecords()
def save(self, target=None, shp=None, shx=None, dbf=None):
"""Save the shapefile data to three files or
three file-like objects. SHP and DBF files can also
be written exclusively using saveShp, saveShx, and saveDbf respectively."""
# TODO: Create a unique filename for target if None.
if shp:
self.saveShp(shp)
if shx:
self.saveShx(shx)
if dbf:
self.saveDbf(dbf)
elif target:
self.saveShp(target)
self.shp.close()
self.saveShx(target)
self.shx.close()
self.saveDbf(target)
self.dbf.close()
class Editor(Writer):
def __init__(self, shapefile=None, shapeType=POINT, autoBalance=1):
self.autoBalance = autoBalance
if not shapefile:
Writer.__init__(self, shapeType)
elif is_string(shapefile):
base = os.path.splitext(shapefile)[0]
if os.path.isfile("%s.shp" % base):
r = Reader(base)
Writer.__init__(self, r.shapeType)
self._shapes = r.shapes()
self.fields = r.fields
self.records = r.records()
def select(self, expr):
"""Select one or more shapes (to be implemented)"""
# TODO: Implement expressions to select shapes.
pass
def delete(self, shape=None, part=None, point=None):
"""Deletes the specified part of any shape by specifying a shape
number, part number, or point number."""
# shape, part, point
if shape and part and point:
del self._shapes[shape][part][point]
# shape, part
elif shape and part and not point:
del self._shapes[shape][part]
# shape
elif shape and not part and not point:
del self._shapes[shape]
# point
elif not shape and not part and point:
for s in self._shapes:
if s.shapeType == 1:
del self._shapes[point]
else:
for part in s.parts:
del s[part][point]
# part, point
elif not shape and part and point:
for s in self._shapes:
del s[part][point]
# part
elif not shape and part and not point:
for s in self._shapes:
del s[part]
def point(self, x=None, y=None, z=None, m=None, shape=None, part=None, point=None, addr=None):
"""Creates/updates a point shape. The arguments allows
you to update a specific point by shape, part, point of any
shape type."""
# shape, part, point
if shape and part and point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
try: self._shapes[shape][part][point]
except IndexError: self._shapes[shape][part].append([])
p = self._shapes[shape][part][point]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][point] = p
# shape, part
elif shape and part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
points = self._shapes[shape][part]
for i in range(len(points)):
p = points[i]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][i] = p
# shape
elif shape and not part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
# point
# part
if addr:
shape, part, point = addr
self._shapes[shape][part][point] = [x, y, z, m]
else:
Writer.point(self, x, y, z, m)
if self.autoBalance:
self.balance()
def validate(self):
"""An optional method to try and validate the shapefile
as much as possible before writing it (not implemented)."""
#TODO: Implement validation method
pass
def balance(self):
"""Adds a corresponding empty attribute or null geometry record depending
on which type of record was created to make sure all three files
are in synch."""
if len(self.records) > len(self._shapes):
self.null()
elif len(self.records) < len(self._shapes):
self.record()
def __fieldNorm(self, fieldName):
"""Normalizes a dbf field name to fit within the spec and the
expectations of certain ESRI software."""
if len(fieldName) > 11: fieldName = fieldName[:11]
fieldName = fieldName.upper()
fieldName.replace(' ', '_')
# Begin Testing
def test():
import doctest
doctest.NORMALIZE_WHITESPACE = 1
doctest.testfile("README.txt", verbose=1)
if __name__ == "__main__":
"""
Doctests are contained in the module 'pyshp_usage.py'. This library was developed
using Python 2.3. Python 2.4 and above have some excellent improvements in the built-in
testing libraries but for now unit testing is done using what's available in
2.3.
"""
test()
|
janhui/test_engine
|
dev/plugins/define_boundary_ids/shapefile.py
|
Python
|
lgpl-2.1
| 38,004
|
# -*- coding: utf-8 -*-
# Resource object code
#
# Created: Thu Jul 25 00:08:39 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = "\
\x00\x00\x05\x3e\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x01\x04\x7d\x4a\x62\
\x00\x00\x00\x09\x70\x48\x59\x73\x00\x00\x00\x7e\x00\x00\x00\x7e\
\x01\x6a\xf1\x2e\x6d\x00\x00\x00\x19\x74\x45\x58\x74\x53\x6f\x66\
\x74\x77\x61\x72\x65\x00\x41\x64\x6f\x62\x65\x20\x49\x6d\x61\x67\
\x65\x52\x65\x61\x64\x79\x71\xc9\x65\x3c\x00\x00\x04\xcb\x49\x44\
\x41\x54\x78\xda\x62\x60\xc0\x06\xd2\x0f\x3a\xff\xff\x5f\xc1\xff\
\x5f\xa7\xfa\xe2\x7f\x26\xac\x2a\xfe\x7f\x90\xff\x0f\xc2\x1d\xeb\
\x9a\xff\x03\x04\x10\x0e\xfd\x40\x45\x30\x55\x18\x66\x30\x82\x44\
\x19\x04\x1e\x32\xe4\xcd\x9f\xc9\x60\x2a\x7b\x99\x01\x20\x80\x18\
\x61\xda\x40\xf4\x0c\xfb\xbd\x0c\xba\xd5\x17\xc1\x2a\xcb\x1c\x67\
\x33\xc4\x9a\x6c\x66\x60\x41\x37\xe2\xf2\x5f\x3b\x06\x86\x0a\x01\
\x06\xdd\xee\x4d\x0c\x9f\x7f\xf2\x60\x2a\xd0\x65\x3e\xc4\xc0\xd0\
\xcd\xc0\x10\x63\xbc\x89\x21\xcb\x7a\x39\xaa\xbb\xc1\x18\xea\x42\
\x90\x2f\x41\x34\x40\x00\x31\x10\x02\x60\x57\x9b\x1d\x32\x60\x38\
\x65\x77\x01\x6c\x2f\xdc\x2d\xa5\x7e\x20\x6a\x02\xd8\x78\xd3\x4d\
\xfe\x28\xc6\xaf\x3f\x92\x08\xb3\x62\x3f\xd6\xc0\x0c\xd0\xd9\x07\
\x67\x63\x55\x70\xfa\x91\x0e\x9c\x8d\xe2\xcd\xbc\x90\x7e\x86\xfd\
\xdd\x4e\xc8\x6e\x80\x98\x60\xc4\xff\x05\xcc\x99\xb4\xbb\x08\x2e\
\x01\x73\x30\x48\xc1\x87\x54\xf9\x17\xd0\x50\x62\x03\x6a\xfd\x05\
\x57\x04\x04\x09\x00\x01\xc4\x08\x0d\xa8\x78\x20\xb5\x80\x81\x34\
\xf0\x81\x51\xe0\xa1\x20\xcc\x80\xff\x19\x17\x55\x18\xce\x7d\xe4\
\x01\x3b\x67\x86\xfe\x1d\x06\x50\x8c\x83\x40\x67\x72\x25\xc3\x0d\
\x53\x6d\x86\x33\x8f\x75\xe1\x3a\xd5\x45\xef\x31\xac\x49\x28\x00\
\x31\x0f\xb0\x10\xb2\xa6\x5c\x74\x1a\x03\x43\x84\x00\x22\xae\x81\
\x7e\xbb\xf9\x5a\x89\x61\xda\xd1\x48\x70\x5c\x33\x31\x90\x09\x78\
\xd9\xbf\x60\x06\x33\x36\xf0\xf4\xa3\x18\xc3\xb3\x47\x6a\x0c\xd3\
\x8e\x45\xc2\xbd\x61\x02\x4c\xc8\xa0\xb4\x0a\x4e\x2b\xb0\x30\x20\
\x14\xa1\xa6\x72\x57\xc0\x7c\x75\xb1\xfb\x0c\xd6\x93\x97\x33\xac\
\x8e\xcf\x67\xd0\x10\xbb\x8f\x08\x83\xd9\x0f\x25\xc0\xb8\x50\xe9\
\x29\x43\xa4\xcc\x6b\x70\x20\x9a\xda\x40\xa3\x6f\x8b\x04\x98\x5a\
\x7c\xd6\x8f\xe1\xe6\x2b\x45\x70\x5a\x07\x6a\x46\xb8\x00\xea\x8a\
\x7e\x20\x65\x80\xe1\x8c\x23\x3f\x18\x18\x6c\x38\xb0\x39\x30\x01\
\x18\x8d\x0f\x01\x02\x88\x81\x52\x00\x0b\x83\xf7\x40\x4a\x80\x44\
\xbd\x20\x17\x2c\x64\x84\x3a\xbd\x00\x94\xe9\x40\x00\x94\xf1\x40\
\x60\x9a\xaf\x27\xc3\x74\xbb\x4c\x14\x1d\x52\x7c\x2f\x19\x76\xa6\
\xa7\x22\x6c\x17\x78\xc8\xc8\x84\xd5\xdf\x40\x90\x75\x78\x06\xc3\
\xd1\x1e\x1b\x70\x94\x81\x30\x08\x3c\xfb\x24\x8e\x92\xab\x09\xa6\
\x03\xbe\x9f\x9f\x19\xe6\x47\x54\xa3\xa4\x42\x10\xb0\x9a\xb4\x8c\
\xe1\x58\x5e\x14\xee\x0c\x8d\x0b\x34\x7b\x4e\x00\xd3\xa0\xf2\x0c\
\x6f\x89\x80\x0b\x98\xca\x5e\xc1\x10\x23\xc9\x80\xd3\x8f\x75\x28\
\x33\xa0\x76\x7b\x01\x3c\x36\x88\x0a\x44\x58\x7e\xb8\x01\x4c\xbe\
\x5d\xfb\x11\xd1\x87\x1c\x95\x38\x0d\xf8\xc4\xce\xcb\x60\x5d\x72\
\x84\x81\x61\x25\xaa\xf8\xd1\xdc\x48\x8c\x92\x7b\x3f\x90\x76\xc0\
\x9a\x8d\x81\x18\x5c\x02\x01\x33\xce\xe7\x9f\xdc\xc0\x32\xe0\x2b\
\x43\xe8\xc2\x09\x60\x36\x28\x1a\x41\x09\x09\x25\x37\x4a\xb2\xff\
\x62\xf0\x91\x78\xc7\xc0\x60\xfb\x8c\x41\xfa\xf2\x43\x06\xe9\x0f\
\xf2\x70\x03\x41\x59\x18\x1b\x60\x42\x36\xa0\xe9\x96\x1c\x84\x13\
\x05\x8d\xe7\xe8\x57\x0c\x0c\xcb\x20\x25\x4f\xa6\xd5\x72\x30\x46\
\x2e\xd3\x61\x5e\x00\x59\xf3\xe0\xd9\x0f\x36\x06\x5e\x96\xbf\x60\
\x0c\x07\xd0\x82\x95\x01\xea\x92\xc4\x15\xad\xe0\x52\x09\x5a\x98\
\xa0\x94\xca\xf2\x58\x8b\xf5\x37\x40\xc3\x44\x98\xb1\xb9\xfc\x02\
\x50\x73\x21\x88\x01\x10\x60\x8c\x68\x0d\x00\x90\x41\x05\xb8\x32\
\x18\x15\x00\x28\xab\x4f\x40\x2e\x88\x90\x7d\x00\x92\x14\xe8\xbb\
\x2b\xcd\x70\xeb\x0b\x27\x5c\x87\x1a\xcf\x77\x86\x22\xe5\xa7\x88\
\x74\xb5\x4c\x85\x61\x9f\x84\x03\x38\x6d\x61\x03\xd2\x7c\xaf\x18\
\xa4\xf8\x5f\x81\x5b\x20\xa0\x72\x18\x57\xa5\x06\xf2\x24\xc8\x21\
\x30\x07\xc0\x93\x12\xac\x82\x83\x01\x78\x45\x87\x16\x2f\x37\xc4\
\xd5\x19\xf2\x81\x95\xf5\x33\x01\x69\x82\xde\x06\x15\x47\x13\x03\
\xda\x18\xf8\x38\xbe\x22\x0b\x1f\x00\x3a\xc0\x91\x85\xdc\xb0\xd4\
\x78\x79\x93\x61\xe7\x54\x2f\x60\x8b\x89\x1f\xdc\x6a\x82\x67\xa0\
\x1f\xdc\x0c\x4b\x80\x95\xc7\xf4\x63\x88\x0c\x03\x4a\x38\xa0\x64\
\x0c\x4a\x85\xf0\x86\x13\x39\x65\x01\x31\x00\xe4\x4b\x90\x25\xa0\
\xa4\x0e\x2b\x88\x61\x00\xe4\x28\xe4\xaa\x92\x26\x0e\x40\xa9\x15\
\xac\x96\x63\x29\x11\x75\xe9\xe7\x00\x62\x00\x4d\x1d\x30\xed\x58\
\x24\x86\x98\x93\xea\x09\xe2\xeb\x44\x72\x01\x28\x9e\xf3\x37\x54\
\xa1\x54\x7d\xb0\x2a\x11\xd6\xa2\xa1\x8a\x03\x16\x9b\x46\x33\x4c\
\xff\x9d\xc1\xf0\xb9\x9b\x0f\xaf\x3a\x50\x7b\x74\x1e\xb0\x72\x47\
\xcb\x86\xe4\x39\x80\x98\xfc\x0f\x4a\xfd\x4e\x2a\x27\x80\xc1\x7d\
\x92\x41\x1a\x58\x28\xc1\x42\x05\x54\x2d\xa1\x3b\x02\xa3\x20\xc2\
\x0b\x80\xed\x7b\x86\x0e\x60\x21\xf6\xf1\x1f\xb0\x7a\x01\xfa\xda\
\x9b\x0b\x43\xc9\xbe\xdb\xe6\xc0\xe0\xaf\x86\xb7\x81\x63\x8c\x37\
\xa3\x94\x09\x48\x65\x01\xf6\x82\x68\xf9\x13\x51\x86\xfe\x7b\x08\
\xdf\x4d\xd7\xbb\xc3\x60\x2c\x00\xa9\xd2\x18\x7c\x80\xad\xd5\x8f\
\xd0\x96\xf0\x91\xd7\xf0\x5a\x0a\xbd\x22\x86\x01\x50\x1a\x80\x25\
\x3a\x50\x83\x04\x54\x4c\x83\x5a\xb6\xd8\xa2\xe0\x02\x2c\x04\x40\
\x65\x3f\x0c\xf0\x30\xff\x65\x90\xe4\xf8\x85\x50\xfd\x10\x58\xdf\
\x3e\xfa\x03\x09\x01\x50\x48\x08\x3c\x44\xc8\x5d\x02\x3a\x5a\x8e\
\x05\xdc\x70\x87\x35\xde\xf3\xd6\x57\x01\x5b\x00\x13\x11\xdd\x0d\
\x60\x35\x8a\x14\x05\x17\xd0\x9b\xd7\xf1\xe0\xce\x1f\xb1\x8d\xdc\
\xad\xdf\x80\x15\xfe\x6b\x08\xdb\x1b\x58\x79\x2d\x15\xc3\x4c\x2f\
\xc0\x0a\x2b\x1f\xe8\x08\x50\x93\x14\xe4\xf3\x72\xa7\x39\xb0\x8a\
\x08\xd4\x30\xde\xc8\x30\x18\x00\x00\x74\x67\xf6\x10\x90\x35\x83\
\x2d\x00\x00\x00\x00\x49\x45\x4e\x44\xae\x42\x60\x82\
"
qt_resource_name = "\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x17\
\x03\xff\x46\x7e\
\x00\x62\
\x00\x6f\x00\x75\x00\x6e\x00\x64\x00\x61\x00\x72\x00\x79\x00\x5f\x00\x69\x00\x64\x00\x65\x00\x6e\x00\x74\x00\x69\x00\x66\x00\x69\
\x00\x63\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct = "\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x48\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
|
adamcandy/QGIS-Meshing
|
plugins/boundary_identification/resources.py
|
Python
|
lgpl-2.1
| 6,749
|
#! /usr/bin/env python
#/*
# * example_02.c: sample gnome-print code
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU Library General Public License
# * as published by the Free Software Foundation; either version 2 of
# * the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU Library General Public License for more details.
# *
# * You should have received a copy of the GNU Library General Public
# * License along with this program; if not, write to the Free Software
# * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# *
# * Authors:
# * Chema Celorio <chema@ximian.com>
# Python conversion:
# Gustavo J. A. M. Carneiro <gustavo@users.sf.net>
# *
# * Copyright (C) 2002 Ximian Inc. and authors
# *
# */
#/*
# * See README
# */
import pygtk; pygtk.require("2.0")
import gnomeprint
import gtk.gdk
import struct
NUMBER_OF_PIXELS=256
def my_print_image_from_pixbuf(gpc, pixbuf):
raw_image = pixbuf.get_pixels()
has_alpha = pixbuf.get_has_alpha()
rowstride = pixbuf.get_rowstride()
height = pixbuf.get_height()
width = pixbuf.get_width()
if has_alpha:
gpc.rgbaimage(raw_image, width, height, rowstride)
else:
gpc.rgbimage(raw_image, width, height, rowstride)
def my_print_image_from_disk(gpc):
# Load the image into a pixbuf
pixbuf = gtk.gdk.pixbuf_new_from_file("sample-image.png")
# Save the graphic context, scale, print the image and restore
gpc.gsave()
gpc.scale(144, 144)
my_print_image_from_pixbuf(gpc, pixbuf)
gpc.grestore()
def my_print_image_from_memory(gpc):
pixels = NUMBER_OF_PIXELS;
# Create the image in memory
color_image = []
for y in xrange(pixels):
for x in xrange(pixels):
color_image.append(struct.pack("BBB",
(x + y) >> 1,
(x + (pixels - 1 - y)) >> 1,
((pixels - 1 - x) + y) >> 1))
# All images in postscript are printed on a 1 x 1 square, since we
# want an image which has a size of 2" by 2" inches, we have to scale
# the CTM (Current Transformation Matrix). Save the graphic state and
# restore it after we are done so that our scaling does not affect the
# drawing calls that follow.
gpc.gsave()
gpc.scale(144, 144)
gpc.rgbimage("".join(color_image), pixels, pixels, pixels * 3)
gpc.grestore()
def my_draw(gpc):
gpc.beginpage("1")
gpc.translate(200, 100)
my_print_image_from_memory(gpc)
gpc.translate(0, 150)
my_print_image_from_disk(gpc)
gpc.showpage()
def my_print():
job = gnomeprint.Job(gnomeprint.config_default())
gpc = job.get_context()
my_draw(gpc)
job.close()
job.print_()
my_print()
print "Done..."
|
dieterv/gnome-python-desktop
|
examples/gnomeprint/example_02.py
|
Python
|
lgpl-2.1
| 2,964
|
#!/usr/bin/env python
##########################################################################
#
# QGIS-meshing plugins.
#
# Copyright (C) 2012-2013 Imperial College London and others.
#
# Please see the AUTHORS file in the main source directory for a
# full list of copyright holders.
#
# Dr Adam S. Candy, adam.candy@imperial.ac.uk
# Applied Modelling and Computation Group
# Department of Earth Science and Engineering
# Imperial College London
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation,
# version 2.1 of the License.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
##########################################################################
import numpy
import argparse
import os
import math
from Scientific.IO import NetCDF
def main():
parser = argparse.ArgumentParser(
prog="gaussian_bump",
description="""Create a Gaussian bump in a netcdf file"""
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help="Verbose output: mainly progress reports.",
default=False
)
parser.add_argument(
'-d',
'--domain',
help="Domain size. Defualt is 1000x1000m",
default=1000.0,
type=float
)
parser.add_argument(
'-b',
'--bumpheight',
help="Distance between seabed and top of bump. Default is 100m",
default=100,
type=float
)
parser.add_argument(
'-r',
'--resolution',
help="Resolution of output netcdf file. Default is 10m",
default=10.0,
type=float
)
parser.add_argument(
'--shift',
help="Shift the bump in the 'north-south' direction, wrapping along the top/bottom",
default = 0,
type=float
)
parser.add_argument(
'--spread',
help="Spread of Gaussian",
default = 100.0,
type=float
)
parser.add_argument(
'output_file',
metavar='output_file',
nargs=1,
help='The output netcdf file'
)
args = parser.parse_args()
verbose = args.verbose
output_file = args.output_file[0]
domain_size = args.domain
bump_height = args.bumpheight
resolution = args.resolution
shift = args.shift
spread = args.spread
nPoints = int(domain_size / resolution)
shift = int(shift/resolution)
if (verbose):
print nPoints, shift
# generate regular grid
X, Y = numpy.meshgrid(numpy.linspace(0.0, domain_size, nPoints), numpy.linspace(0.0, domain_size, nPoints))
Z = numpy.zeros((nPoints,nPoints))
#for each point calculate the Gaussian
centre = domain_size/2.0
for i in range(0,len(X)):
for j in range(0,len(X[0])):
r = ((X[i][j]-centre)**2/(2.0*spread**2) + (Y[i][j]-centre)**2/(2.0*spread**2))
Z[i][j] = bump_height * math.exp(-1.0*r)
if (not shift == 0.0):
Z = numpy.roll(Z, shift, 0)
f = NetCDF.NetCDFFile(output_file, 'w')
xDim = f.createDimension("X", nPoints)
yDim = f.createDimension("Y", nPoints)
x = f.createVariable("X","d",("X",))
y = f.createVariable("Y","d",("Y",))
zVar = f.createVariable("Z","d",("X","Y"))
x.assignValue(X[0,0:nPoints])
y.assignValue(Y[0:nPoints,0])
zVar.assignValue(Z)
f.close()
os.system('grdreformat '+output_file+' '+output_file)
os.system('rm -f 1_contour.* 50_contour.*')
os.system('gdal_contour -fl 1.0 NETCDF:"'+output_file+'":z 1_contour.shp')
os.system('gdal_contour -fl 50.0 NETCDF:"'+output_file+'":z 50_contour.shp')
if __name__ == "__main__":
main()
|
adamcandy/qgis-plugins-meshing
|
dev/tests/gaussian_bump.py
|
Python
|
lgpl-2.1
| 4,413
|
# -*- coding: utf-8 -*-
'''
Tests for basic capturing
'''
import os
import os.path
import shutil
import tempfile
import unittest
from pyca import capture, config, db, utils
from tests.tools import should_fail, terminate_fn, reload
class TestPycaCapture(unittest.TestCase):
def setUp(self):
utils.http_request = lambda x, y=False: b'xxx'
self.fd, self.dbfile = tempfile.mkstemp()
self.cadir = tempfile.mkdtemp()
preview = os.path.join(self.cadir, 'preview.png')
open(preview, 'a').close()
config.config()['agent']['database'] = 'sqlite:///' + self.dbfile
config.config()['capture']['command'] = 'touch {{dir}}/{{name}}.mp4'
config.config()['capture']['directory'] = self.cadir
config.config()['capture']['preview'] = [preview]
config.config()['services']['org.opencastproject.capture.admin'] = ['']
# Mock event
db.init()
self.event = db.BaseEvent()
self.event.uid = '123123'
self.event.title = u'äüÄÜß'
self.event.start = utils.timestamp()
self.event.end = self.event.start
self.event.status = db.Status.UPCOMING
data = [{'data': u'äüÄÜß',
'fmttype': 'application/xml',
'x-apple-filename': 'episode.xml'},
{'data': u'äüÄÜß',
'fmttype': 'application/xml',
'x-apple-filename': 'series.xml'},
{'data': u'event.title=äüÄÜß\n' +
u'org.opencastproject.workflow.config.x=123\n' +
u'org.opencastproject.workflow.definition=fast',
'fmttype': 'application/text',
'x-apple-filename': 'org.opencastproject.capture.agent' +
'.properties'}]
self.event.set_data({'attach': data})
def tearDown(self):
os.close(self.fd)
os.remove(self.dbfile)
shutil.rmtree(self.cadir)
reload(capture)
reload(config)
reload(utils)
def test_start_capture(self):
capture.start_capture(self.event)
def test_start_capture_recording_command_failure(self):
config.config()['capture']['command'] = 'false'
with self.assertRaises(RuntimeError):
capture.start_capture(self.event)
def test_start_capture_sigterm(self):
config.config()['capture']['command'] = 'sleep 10'
config.config()['capture']['sigterm_time'] = 0
capture.start_capture(self.event)
def test_start_capture_sigkill(self):
config.config()['capture']['command'] = 'sleep 10'
config.config()['capture']['sigkill_time'] = 0
capture.start_capture(self.event)
def test_safe_start_capture(self):
'''Ensure that safe_start_capture always returns without error to not
disrupt the main loop.
'''
capture.start_capture = should_fail
capture.safe_start_capture(self.event)
def test_run(self):
capture.terminate = terminate_fn(1)
capture.run()
def test_sigterm(self):
with self.assertRaises(BaseException) as e:
capture.sigterm_handler(0, 0)
self.assertEqual(e.exception.code, 0)
self.assertTrue(utils.terminate())
|
opencast/pyCA
|
tests/test_capture.py
|
Python
|
lgpl-3.0
| 3,301
|
#!/usr/bin/env python
# Copyright (C) 2014 Swift Navigation Inc.
# Contact: Gareth McMullin <gareth@swift-nav.com>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
import struct
import sys
import serial_link
import threading
from sbp.file_io import *
class FileIO(object):
def __init__(self, link):
self.link = link
def read(self, filename):
"""
Read the contents of a file.
Parameters
----------
filename : str
Name of the file to read.
Returns
-------
out : str
Contents of the file.
"""
chunksize = 255 - 6 - len(filename)
buf = ''
while True:
msg = struct.pack("<IB", len(buf), chunksize) + filename + '\0'
self.link.send(SBP_MSG_FILEIO_READ, msg)
data = self.link.wait(SBP_MSG_FILEIO_READ, timeout=1.0)
if not data:
raise Exception("Timeout waiting for FILEIO_READ reply")
if data[:len(msg)] != msg:
raise Exception("Reply FILEIO_READ doesn't match request")
chunk = data[len(msg):]
buf += chunk
if len(chunk) != chunksize:
return buf
def readdir(self, dirname='.'):
"""
List the files in a directory.
Parameters
----------
dirname : str (optional)
Name of the directory to list. Defaults to the root directory.
Returns
-------
out : [str]
List of file names.
"""
files = []
while True:
msg = struct.pack("<I", len(files)) + dirname + '\0'
self.link.send(SBP_MSG_FILEIO_READ_DIR, msg)
data = self.link.wait(SBP_MSG_FILEIO_READ_DIR, timeout=1.0)
if not data:
raise Exception("Timeout waiting for FILEIO_READ_DIR reply")
if data[:len(msg)] != msg:
raise Exception("Reply FILEIO_READ_DIR doesn't match request")
chunk = data[len(msg):].split('\0')
files += chunk[:-1]
if chunk[-1] == '\xff':
return files
def remove(self, filename):
"""
Delete a file.
Parameters
----------
filename : str
Name of the file to delete.
"""
self.link.send(SBP_MSG_FILEIO_REMOVE, filename + '\0')
def write(self, filename, data, offset=0, trunc=True):
"""
Write to a file.
Parameters
----------
filename : str
Name of the file to write to.
data : str
Data to write
offset : int (optional)
Offset into the file at which to start writing in bytes.
trunc : bool (optional)
Overwite the file, i.e. delete any existing file before writing. If
this option is not specified and the existing file is longer than the
current write then the contents of the file beyond the write will
remain. If offset is non-zero then this flag is ignored.
Returns
-------
out : str
Contents of the file.
"""
if trunc and offset == 0:
self.remove(filename)
chunksize = 255 - len(filename) - 5
while data:
chunk = data[:chunksize]
data = data[chunksize:]
header = struct.pack("<I", offset) + filename + '\0'
self.link.send(SBP_MSG_FILEIO_WRITE, header + chunk)
reply = self.link.wait(SBP_MSG_FILEIO_WRITE, timeout=1.0)
if not reply:
raise Exception("Timeout waiting for FILEIO_WRITE reply")
if reply != header:
raise Exception("Reply FILEIO_WRITE doesn't match request")
offset += len(chunk)
def hexdump(data):
"""
Print a hex dump.
Parameters
----------
data : indexable
Data to display dump of, can be anything that supports length and index
operations.
"""
ret = ''
ofs = 0
while data:
chunk = data[:16]
data = data[16:]
s = "%08X " % ofs
s += " ".join("%02X" % ord(c) for c in chunk[:8]) + " "
s += " ".join("%02X" % ord(c) for c in chunk[8:])
s += "".join(" " for i in range(60 - len(s))) + "|"
for c in chunk:
s += c if 32 <= ord(c) < 128 else '.'
s += '|\n'
ofs += 16
ret += s
return ret
def print_dir_listing(files):
"""
Print a directory listing.
Parameters
----------
files : [str]
List of file names in the directory.
"""
for f in files:
print f
def get_args():
"""
Get and parse arguments.
"""
import argparse
parser = argparse.ArgumentParser(description='Swift Nav File I/O Utility.')
parser.add_argument('-r', '--read', nargs=1,
help='read a file')
parser.add_argument('-l', '--list', default=None, nargs=1,
help='list a directory')
parser.add_argument('-d', '--delete', nargs=1,
help='delete a file')
parser.add_argument('-p', '--port',
default=[serial_link.SERIAL_PORT], nargs=1,
help='specify the serial port to use.')
parser.add_argument("-b", "--baud",
default=[serial_link.SERIAL_BAUD], nargs=1,
help="specify the baud rate to use.")
parser.add_argument("-v", "--verbose",
help="print extra debugging information.",
action="store_true")
parser.add_argument("-x", "--hex",
help="output in hex dump format.",
action="store_true")
parser.add_argument("-f", "--ftdi",
help="use pylibftdi instead of pyserial.",
action="store_true")
return parser.parse_args()
def main():
args = get_args()
port = args.port[0]
baud = args.baud[0]
# Driver with context
with serial_link.get_driver(args.ftdi, port, baud) as driver:
# Handler with context
with Handler(driver.read, driver.write, args.verbose) as link:
f = FileIO(link)
try:
if args.read:
data = f.read(args.read[0])
if args.hex:
print hexdump(data)
else:
print data
elif args.delete:
f.remove(args.delete[0])
elif args.list is not None:
print_dir_listing(f.readdir(args.list[0]))
else:
print "No command given, listing root directory:"
print_dir_listing(f.readdir())
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
|
denniszollo/piksi_tools
|
piksi_tools/fileio.py
|
Python
|
lgpl-3.0
| 6,491
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""File used to unit test the pacifica archive interface."""
import unittest
import os
from stat import ST_MODE
from six import PY2
from pacifica.archiveinterface.archive_utils import bytes_type
from pacifica.archiveinterface.backends.posix.archive import PosixBackendArchive
import pacifica.archiveinterface.config as pa_config
from .common_setup_test import SetupTearDown
class TestPosixBackendArchive(unittest.TestCase, SetupTearDown):
"""Test the Posix backend archive."""
def test_posix_backend_create(self):
"""Test creating a posix backend."""
backend = PosixBackendArchive('/tmp')
self.assertTrue(isinstance(backend, PosixBackendArchive))
# easiest way to unit test is look at class variable
# pylint: disable=protected-access
self.assertEqual(backend._prefix, '/tmp')
# pylint: enable=protected-access
def test_posix_backend_open(self):
"""Test opening a file from posix backend."""
filepath = '1234'
mode = 'w'
backend = PosixBackendArchive('/tmp')
my_file = backend.open(filepath, mode)
self.assertTrue(isinstance(my_file, PosixBackendArchive))
# easiest way to unit test is look at class variable
# pylint: disable=protected-access
self.assertEqual(backend._file.__class__.__name__, 'ExtendedFile')
# pylint: enable=protected-access
my_file.close()
def test_posix_backend_stage(self):
"""Test staging a file from posix backend."""
filepath = '1234'
mode = 'w'
backend = PosixBackendArchive('/tmp')
my_file = backend.open(filepath, mode)
my_file.stage()
# pylint: disable=protected-access
self.assertTrue(my_file._file._staged)
# pylint: enable=protected-access
my_file.close()
def test_posix_backend_open_twice(self):
"""Test opening a file from posix backend twice."""
filepath = '1234'
mode = 'w'
backend = PosixBackendArchive('/tmp')
my_file = backend.open(filepath, mode)
my_file = backend.open(filepath, mode)
self.assertTrue(isinstance(my_file, PosixBackendArchive))
# easiest way to unit test is look at class variable
# pylint: disable=protected-access
self.assertEqual(backend._file.__class__.__name__, 'ExtendedFile')
# pylint: enable=protected-access
my_file.close()
def test_posix_backend_open_id2f(self):
"""Test opening a file from posix backend twice."""
backend = PosixBackendArchive('/tmp')
mode = 'w'
my_file = backend.open('/a/b/d', mode)
temp_cfg_file = pa_config.CONFIG_FILE
pa_config.CONFIG_FILE = os.path.join(os.path.dirname(__file__), 'test_configs', 'posix-id2filename.cfg')
backend = PosixBackendArchive('/tmp')
my_file = backend.open(12345, mode)
my_file.write('this is file 12345')
my_file.close()
# pylint: disable=protected-access
my_file.patch(123456789, '/tmp{}'.format(my_file._id2filename(12345)))
# pylint: enable=protected-access
my_file = backend.open(123456789, 'r')
text = my_file.read(-1)
pa_config.CONFIG_FILE = temp_cfg_file
self.assertTrue(isinstance(my_file, PosixBackendArchive))
self.assertEqual(bytes_type('this is file 12345'), text)
my_file.close()
def test_posix_backend_close(self):
"""Test closing a file from posix backend."""
filepath = '1234'
mode = 'w'
backend = PosixBackendArchive('/tmp/')
my_file = backend.open(filepath, mode)
# easiest way to unit test is look at class variable
# pylint: disable=protected-access
self.assertEqual(backend._file.__class__.__name__, 'ExtendedFile')
my_file.close()
self.assertEqual(backend._file, None)
# pylint: enable=protected-access
def test_posix_backend_write(self):
"""Test writing a file from posix backend."""
filepath = '1234'
mode = 'w'
backend = PosixBackendArchive('/tmp/')
my_file = backend.open(filepath, mode)
error = my_file.write('i am a test string')
if PY2:
self.assertEqual(error, None)
else:
self.assertEqual(error, 18)
my_file.close()
def test_posix_file_mod_time(self):
"""Test the correct setting of a file mod time."""
filepath = '1234'
mode = 'w'
backend = PosixBackendArchive('/tmp/')
my_file = backend.open(filepath, mode)
my_file.close()
my_file.set_mod_time(1000000)
my_file = backend.open(filepath, 'r')
status = my_file.status()
my_file.close()
self.assertEqual(status.mtime, 1000000)
def test_posix_file_permissions(self):
"""Test the correct setting of a file mod time."""
filepath = '12345'
mode = 'w'
backend = PosixBackendArchive('/tmp/')
my_file = backend.open(filepath, mode)
my_file.close()
my_file.set_file_permissions()
statinfo = oct(os.stat('/tmp/12345')[ST_MODE])[-3:]
self.assertEqual(statinfo, '444')
def test_posix_backend_read(self):
"""Test reading a file from posix backend."""
self.test_posix_backend_write()
filepath = '1234'
mode = 'r'
backend = PosixBackendArchive('/tmp/')
my_file = backend.open(filepath, mode)
buf = my_file.read(-1)
self.assertEqual(buf, bytes_type('i am a test string'))
my_file.close()
def test_patch(self):
"""Test patching file."""
old_path = '/tmp/1234'
backend = PosixBackendArchive('/tmp')
my_file = backend.open('1234', 'w')
my_file.close()
backend.patch('5678', '/tmp/1234')
# Error would be thrown on patch so nothing to assert
self.assertEqual(old_path, '/tmp/1234')
def test_seek(self):
"""Test patching file."""
backend = PosixBackendArchive('/tmp')
my_file = backend.open('1234', 'w')
my_file.write('something')
my_file.close()
my_file = backend.open('1234', 'r')
my_file.seek(4)
data = my_file.read(-1).decode('utf8')
self.assertEqual(data, 'thing')
|
dmlb2000/pacifica-archiveinterface
|
tests/posix_test.py
|
Python
|
lgpl-3.0
| 6,379
|
# This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 19.01.2015
@author: marscher
'''
import numpy as np
from pyemma._base.serialization.serialization import SerializableMixIn
from pyemma._ext.variational.solvers.direct import eig_corr
from pyemma._ext.variational.util import ZeroRankError
from pyemma.coordinates.estimation.covariance import LaggedCovariance
from pyemma.coordinates.transform._tica_base import TICABase, TICAModelBase
from pyemma.util.annotators import fix_docs
import warnings
__all__ = ['TICA']
@fix_docs
class TICA(TICABase, SerializableMixIn):
r""" Time-lagged independent component analysis (TICA)"""
__serialize_version = 0
def __init__(self, lag, dim=-1, var_cutoff=0.95, kinetic_map=True, commute_map=False, epsilon=1e-6,
stride=1, skip=0, reversible=True, weights=None, ncov_max=float('inf')):
r""" Time-lagged independent component analysis (TICA) [1]_, [2]_, [3]_.
Parameters
----------
lag : int
lag time
dim : int, optional, default -1
Maximum number of significant independent components to use to reduce dimension of input data. -1 means
all numerically available dimensions (see epsilon) will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 0.95
Determines the number of output dimensions by including dimensions until their cumulative kinetic variance
exceeds the fraction subspace_variance. var_cutoff=1.0 means all numerically available dimensions
(see epsilon) will be used, unless set by dim. Setting var_cutoff smaller than 1.0 is exclusive with dim
kinetic_map : bool, optional, default True
Eigenvectors will be scaled by eigenvalues. As a result, Euclidean distances in the transformed data
approximate kinetic distances [4]_. This is a good choice when the data is further processed by clustering.
commute_map : bool, optional, default False
Eigenvector_i will be scaled by sqrt(timescale_i / 2). As a result, Euclidean distances in the transformed
data will approximate commute distances [5]_.
epsilon : float
eigenvalue norm cutoff. Eigenvalues of C0 with norms <= epsilon will be
cut off. The remaining number of eigenvalues define the size
of the output.
stride: int, optional, default = 1
Use only every stride-th time step. By default, every time step is used.
skip : int, default=0
skip the first initial n frames per trajectory.
reversible: bool, default=True
symmetrize correlation matrices C_0, C_{\tau}.
weights: object or list of ndarrays, optional, default = None
* An object that allows to compute re-weighting factors to estimate equilibrium means and correlations from
off-equilibrium data. The only requirement is that weights possesses a method weights(X), that accepts a
trajectory X (np.ndarray(T, n)) and returns a vector of re-weighting factors (np.ndarray(T,)).
* A list of ndarrays (ndim=1) specifies the weights for each frame of each trajectory.
Notes
-----
Given a sequence of multivariate data :math:`X_t`, computes the mean-free
covariance and time-lagged covariance matrix:
.. math::
C_0 &= (X_t - \mu)^T (X_t - \mu) \\
C_{\tau} &= (X_t - \mu)^T (X_{t + \tau} - \mu)
and solves the eigenvalue problem
.. math:: C_{\tau} r_i = C_0 \lambda_i(tau) r_i,
where :math:`r_i` are the independent components and :math:`\lambda_i(tau)` are
their respective normalized time-autocorrelations. The eigenvalues are
related to the relaxation timescale by
.. math:: t_i(tau) = -\tau / \ln |\lambda_i|.
When used as a dimension reduction method, the input data is projected
onto the dominant independent components.
References
----------
.. [1] Perez-Hernandez G, F Paul, T Giorgino, G De Fabritiis and F Noe. 2013.
Identification of slow molecular order parameters for Markov model construction
J. Chem. Phys. 139, 015102. doi:10.1063/1.4811489
.. [2] Schwantes C, V S Pande. 2013.
Improvements in Markov State Model Construction Reveal Many Non-Native Interactions in the Folding of NTL9
J. Chem. Theory. Comput. 9, 2000-2009. doi:10.1021/ct300878a
.. [3] L. Molgedey and H. G. Schuster. 1994.
Separation of a mixture of independent signals using time delayed correlations
Phys. Rev. Lett. 72, 3634.
.. [4] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
.. [5] Noe, F., Banisch, R., Clementi, C. 2016. Commute maps: separating slowly-mixing molecular configurations
for kinetic modeling. J. Chem. Theory. Comput. doi:10.1021/acs.jctc.6b00762
"""
super(TICA, self).__init__()
if kinetic_map and commute_map:
raise ValueError('Trying to use both kinetic_map and commute_map. Use either or.')
if (kinetic_map or commute_map) and not reversible:
kinetic_map = False
commute_map = False
warnings.warn("Cannot use kinetic_map or commute_map for non-reversible processes, both will be set to"
"False.")
# this instance will be set by partial fit.
self._covar = None
self.dim = dim
self.var_cutoff = var_cutoff
self.set_params(lag=lag, dim=dim, var_cutoff=var_cutoff, kinetic_map=kinetic_map, commute_map=commute_map,
epsilon=epsilon, reversible=reversible, stride=stride, skip=skip, weights=weights, ncov_max=ncov_max)
@property
def model(self):
if not hasattr(self, '_model') or self._model is None:
self._model = TICAModelBase()
return self._model
def describe(self):
try:
dim = self.dimension()
except RuntimeError:
dim = self.dim
return "[TICA, lag = %i; max. output dim. = %i]" % (self._lag, dim)
def estimate(self, X, **kwargs):
r"""
Chunk-based parameterization of TICA. Iterates over all data and estimates
the mean, covariance and time lagged covariance. Finally, the
generalized eigenvalue problem is solved to determine
the independent components.
"""
return super(TICA, self).estimate(X, **kwargs)
def partial_fit(self, X):
""" incrementally update the covariances and mean.
Parameters
----------
X: array, list of arrays, PyEMMA reader
input data.
Notes
-----
The projection matrix is first being calculated upon its first access.
"""
from pyemma.coordinates import source
iterable = source(X, chunksize=self.chunksize)
indim = iterable.dimension()
if not self.dim <= indim:
raise RuntimeError("requested more output dimensions (%i) than dimension"
" of input data (%i)" % (self.dim, indim))
if self._covar is None:
self._covar = LaggedCovariance(c00=True, c0t=True, ctt=False, remove_data_mean=True, reversible=self.reversible,
lag=self.lag, bessel=False, stride=self.stride, skip=self.skip,
weights=self.weights, ncov_max=self.ncov_max)
self._covar.partial_fit(iterable)
self.model.update_model_params(mean=self._covar.mean, # TODO: inefficient, fixme
cov=self._covar.C00_,
cov_tau=self._covar.C0t_)
self._estimated = False
return self
def _estimate(self, iterable, **kw):
covar = LaggedCovariance(c00=True, c0t=True, ctt=False, remove_data_mean=True, reversible=self.reversible,
lag=self.lag, bessel=False, stride=self.stride, skip=self.skip,
weights=self.weights, ncov_max=self.ncov_max)
indim = iterable.dimension()
if not self.dim <= indim:
raise RuntimeError("requested more output dimensions (%i) than dimension"
" of input data (%i)" % (self.dim, indim))
if self._logger_is_active(self._loglevel_DEBUG):
self.logger.debug("Running TICA with tau=%i; Estimating two covariance matrices"
" with dimension (%i, %i)", self._lag, indim, indim)
covar.estimate(iterable, chunksize=self.chunksize, **kw)
self.model.update_model_params(mean=covar.mean,
cov=covar.C00_,
cov_tau=covar.C0t_)
self._diagonalize()
return self.model
def _diagonalize(self):
# diagonalize with low rank approximation
self.logger.debug("diagonalize Cov and Cov_tau.")
try:
eigenvalues, eigenvectors = eig_corr(self.cov, self.cov_tau, self.epsilon, sign_maxelement=True)
except ZeroRankError:
raise ZeroRankError('All input features are constant in all time steps. No dimension would be left after dimension reduction.')
if self.kinetic_map and self.commute_map:
raise ValueError('Trying to use both kinetic_map and commute_map. Use either or.')
if self.kinetic_map: # scale by eigenvalues
eigenvectors *= eigenvalues[None, :]
if self.commute_map: # scale by (regularized) timescales
timescales = 1-self.lag / np.log(np.abs(eigenvalues))
# dampen timescales smaller than the lag time, as in section 2.5 of ref. [5]
regularized_timescales = 0.5 * timescales * np.maximum(np.tanh(np.pi * ((timescales - self.lag) / self.lag) + 1), 0)
eigenvectors *= np.sqrt(regularized_timescales / 2)
self.logger.debug("finished diagonalisation.")
# compute cumulative variance
cumvar = np.cumsum(np.abs(eigenvalues) ** 2)
cumvar /= cumvar[-1]
self.model.update_model_params(cumvar=cumvar,
eigenvalues=eigenvalues,
eigenvectors=eigenvectors)
self._estimated = True
|
markovmodel/PyEMMA
|
pyemma/coordinates/transform/tica.py
|
Python
|
lgpl-3.0
| 11,423
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('arachne_webmaster')
mobileTemplate.setLevel(62)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setMeatType("Insect Meat")
mobileTemplate.setMeatAmount(30)
mobileTemplate.setSocialGroup("arachne")
mobileTemplate.setAssistRange(12)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_arachne_hatchling.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
attacks.add('bm_damage_poison_3')
attacks.add('bm_defensive_3')
attacks.add('bm_puncture_1')
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('arachne_webmaster', mobileTemplate)
return
|
agry/NGECore2
|
scripts/mobiles/endor/arachne_webmaster.py
|
Python
|
lgpl-3.0
| 1,565
|
#file : InMoov2.minimal.py
# this will run with versions of MRL above 1695
# a very minimal script for InMoov
# although this script is very short you can still
# do voice control of a right hand or finger box
# for any command which you say - you will be required to say a confirmation
# e.g. you say -> open hand, InMoov will ask -> "Did you say open hand?", you will need to
# respond with a confirmation ("yes","correct","yeah","ya")
rightPort = "COM8"
i01 = Runtime.createAndStart("i01", "InMoov")
# starting parts
i01.startEar()
i01.startMouth()
#to tweak the default voice
i01.mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Ryan&txt=")
##############
i01.startRightHand(rightPort)
# tweaking defaults settings of right hand
#i01.rightHand.thumb.setMinMax(55,135)
#i01.rightHand.index.setMinMax(0,160)
#i01.rightHand.majeure.setMinMax(0,140)
#i01.rightHand.ringFinger.setMinMax(48,145)
#i01.rightHand.pinky.setMinMax(45,146)
#i01.rightHand.thumb.map(0,180,55,135)
#i01.rightHand.index.map(0,180,0,160)
#i01.rightHand.majeure.map(0,180,0,140)
#i01.rightHand.ringFinger.map(0,180,48,145)
#i01.rightHand.pinky.map(0,180,45,146)
#################
# verbal commands
ear = i01.ear
ear.addCommand("attach right hand", "i01.rightHand", "attach")
ear.addCommand("disconnect right hand", "i01.rightHand", "detach")
ear.addCommand("rest", i01.getName(), "rest")
ear.addCommand("open hand", "python", "handopen")
ear.addCommand("close hand", "python", "handclose")
ear.addCommand("capture gesture", ear.getName(), "captureGesture")
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addComfirmations("yes","correct","yeah","ya")
ear.addNegations("no","wrong","nope","nah")
ear.startListening()
def handopen():
i01.moveHand("left",0,0,0,0,0)
i01.moveHand("right",0,0,0,0,0)
def handclose():
i01.moveHand("left",180,180,180,180,180)
i01.moveHand("right",180,180,180,180,180)
|
mecax/pyrobotlab
|
toSort/InMoov2.minimal.py
|
Python
|
apache-2.0
| 2,008
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Requirements for Ruby codegen."""
from artman.tasks.requirements import task_requirement_base
class RubyFormatRequirements(task_requirement_base.TaskRequirementBase):
@classmethod
def require(cls):
return ['rubocop']
@classmethod
def install(cls):
# Intentionally do nothing
pass
class RakeRequirements(task_requirement_base.TaskRequirementBase):
@classmethod
def require(cls):
return ['rake']
@classmethod
def install(cls):
# Intentionally do nothing
pass
|
ethanbao/artman
|
artman/tasks/requirements/ruby_requirements.py
|
Python
|
apache-2.0
| 1,142
|
from sklearn import preprocessing
import numpy as np
X = np.array([[ 1., -1., 2.],
[ 2., 0., 0.],
[ 2., 0., 0.],
[ 0., 1., -1.]])
print X
X_scaled = preprocessing.scale(X)
print X_scaled
|
zaqwes8811/ml-cv
|
ml_tests.py
|
Python
|
apache-2.0
| 241
|
#!/usr/bin/python -u
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import call, Popen
from unittest import main, TestCase
from uuid import uuid4
from swiftclient import client
from swift.common import direct_client
from swift.common.exceptions import ClientException
from test.probe.common import kill_server, kill_servers, reset_environment, \
start_server
class TestObjectHandoff(TestCase):
def setUp(self):
(self.pids, self.port2server, self.account_ring, self.container_ring,
self.object_ring, self.url, self.token,
self.account, self.configs) = reset_environment()
def tearDown(self):
kill_servers(self.port2server, self.pids)
def test_main(self):
# Create container
# Kill one container/obj primary server
# Create container/obj (goes to two primary servers and one handoff)
# Kill other two container/obj primary servers
# Indirectly through proxy assert we can get container/obj
# Restart those other two container/obj primary servers
# Directly to handoff server assert we can get container/obj
# Assert container listing (via proxy and directly) has container/obj
# Bring the first container/obj primary server back up
# Assert that it doesn't have container/obj yet
# Run object replication, ensuring we run the handoff node last so it
# should remove its extra handoff partition
# Assert the first container/obj primary server now has container/obj
# Assert the handoff server no longer has container/obj
# Kill the first container/obj primary server again (we have two
# primaries and the handoff up now)
# Delete container/obj
# Assert we can't head container/obj
# Assert container/obj is not in the container listing, both indirectly
# and directly
# Restart the first container/obj primary server again
# Assert it still has container/obj
# Run object replication, ensuring we run the handoff node last so it
# should remove its extra handoff partition
# Assert primary node no longer has container/obj
container = 'container-%s' % uuid4()
client.put_container(self.url, self.token, container)
cpart, cnodes = self.container_ring.get_nodes(self.account, container)
cnode = cnodes[0]
obj = 'object-%s' % uuid4()
opart, onodes = self.object_ring.get_nodes(
self.account, container, obj)
onode = onodes[0]
kill_server(onode['port'], self.port2server, self.pids)
client.put_object(self.url, self.token, container, obj, 'VERIFY')
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
# Kill all primaries to ensure GET handoff works
for node in onodes[1:]:
kill_server(node['port'], self.port2server, self.pids)
odata = client.get_object(self.url, self.token, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Object GET did not return VERIFY, instead it '
'returned: %s' % repr(odata))
for node in onodes[1:]:
start_server(node['port'], self.port2server, self.pids)
# We've indirectly verified the handoff node has the object, but let's
# directly verify it.
another_onode = self.object_ring.get_more_nodes(opart).next()
odata = direct_client.direct_get_object(
another_onode, opart, self.account, container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Direct object GET did not return VERIFY, instead '
'it returned: %s' % repr(odata))
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj not in objs:
raise Exception('Container listing did not know about object')
for cnode in cnodes:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj not in objs:
raise Exception(
'Container server %s:%s did not know about object' %
(cnode['ip'], cnode['port']))
start_server(onode['port'], self.port2server, self.pids)
exc = None
try:
direct_client.direct_get_object(onode, opart, self.account,
container, obj)
except ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
# Run the extra server last so it'll remove its extra partition
processes = []
for node in onodes:
try:
port_num = node['replication_port']
except KeyError:
port_num = node['port']
processes.append(Popen(['swift-object-replicator',
self.configs['object-replicator'] %
((port_num - 6000) / 10),
'once']))
for process in processes:
process.wait()
try:
another_port_num = another_onode['replication_port']
except KeyError:
another_port_num = another_onode['port']
call(['swift-object-replicator',
self.configs['object-replicator'] %
((another_port_num - 6000) / 10), 'once'])
odata = direct_client.direct_get_object(onode, opart, self.account,
container, obj)[-1]
if odata != 'VERIFY':
raise Exception('Direct object GET did not return VERIFY, instead '
'it returned: %s' % repr(odata))
exc = None
try:
direct_client.direct_get_object(another_onode, opart, self.account,
container, obj)
except ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
kill_server(onode['port'], self.port2server, self.pids)
client.delete_object(self.url, self.token, container, obj)
exc = None
try:
client.head_object(self.url, self.token, container, obj)
except client.ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
objs = [o['name'] for o in
client.get_container(self.url, self.token, container)[1]]
if obj in objs:
raise Exception('Container listing still knew about object')
for cnode in cnodes:
objs = [o['name'] for o in
direct_client.direct_get_container(
cnode, cpart, self.account, container)[1]]
if obj in objs:
raise Exception(
'Container server %s:%s still knew about object' %
(cnode['ip'], cnode['port']))
start_server(onode['port'], self.port2server, self.pids)
direct_client.direct_get_object(onode, opart, self.account, container,
obj)
# Run the extra server last so it'll remove its extra partition
processes = []
for node in onodes:
try:
port_num = node['replication_port']
except KeyError:
port_num = node['port']
processes.append(Popen(['swift-object-replicator',
self.configs['object-replicator'] %
((port_num - 6000) / 10),
'once']))
for process in processes:
process.wait()
call(['swift-object-replicator',
self.configs['object-replicator'] %
((another_port_num - 6000) / 10), 'once'])
exc = None
try:
direct_client.direct_get_object(another_onode, opart, self.account,
container, obj)
except ClientException as err:
exc = err
self.assertEquals(exc.http_status, 404)
if __name__ == '__main__':
main()
|
gotostack/swift
|
test/probe/test_object_handoff.py
|
Python
|
apache-2.0
| 9,005
|
# Copyright 2011-2013 Colin Scott
# Copyright 2011-2013 Andreas Wundsam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import sys
import os
import time
from threading import Thread
import logging
log = logging.getLogger()
from sts.util.socket_mux.server_socket_multiplexer import *
from sts.util.socket_mux.sts_socket_multiplexer import *
sys.path.append(os.path.dirname(__file__) + "/../../..")
class MultiplexerTest(unittest.TestCase):
client_messages = [ "foo", "bar", "baz" ]
def setup_server(self, address):
import socket
mux_select = ServerMultiplexedSelect()
ServerMockSocket.bind_called = False
listener = ServerMockSocket(socket.AF_UNIX, socket.SOCK_STREAM,
set_true_listen_socket=mux_select.set_true_listen_socket)
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(address)
listener.listen(16)
return (mux_select, listener)
def setup_client(self, num_socks, address):
try:
from pox.lib.util import connect_socket_with_backoff
io_master = MultiplexedSelect()
socket = connect_socket_with_backoff(address=address)
io_worker = io_master.create_worker_for_socket(socket)
# TODO(cs): unused variable demux
demux = STSSocketDemultiplexer(io_worker, address)
mock_socks = []
for i in xrange(num_socks):
mock_socket = STSMockSocket(None, None)
mock_socket.connect(address)
mock_socket.send(self.client_messages[i])
mock_socks.append(mock_socket)
# Now flush messages
while [ m for m in mock_socks if m.json_worker.io_worker._ready_to_send ] != []:
io_master.select(mock_socks, mock_socks, [])
except Exception as e:
log.critical("Client died: %s" % e)
raise e
def wait_for_next_accept(self, listener, mux_select):
log.info("waiting for next accept")
rl = []
while listener not in rl:
(rl, _, _) = mux_select.select([listener], [], [], 0.1)
def test_basic(self):
address = "basic_pipe"
try:
t = Thread(target=self.setup_client, args=(1,address,), name="MainThread")
t.start()
(mux_select, listener) = self.setup_server(address)
# wait for client to connect
self.wait_for_next_accept(listener, mux_select)
mock_sock = listener.accept()[0]
# now read client message
(rl, _, _) = mux_select.select([mock_sock], [], [])
start = last = time.time()
while mock_sock not in rl:
time.sleep(0.05)
if time.time() - start > 5:
self.fail("Did not find socket in rl in 5 seconds")
elif time.time() - last > 1:
log.debug("waiting for socket %s in rl %s..." % ( str(mock_sock), repr(rl)))
last = time.time()
(rl, _, _) = mux_select.select([mock_sock], [], [])
d = mock_sock.recv(2048)
self.assertEqual(self.client_messages[0], d)
finally:
if ServerSocketDemultiplexer.instance is not None:
ServerSocketDemultiplexer.instance = None
try:
os.unlink(address)
except OSError:
if os.path.exists(address):
raise RuntimeError("can't remove PIPE socket %s" % str(address))
def test_three_incoming(self):
address = "three_pipe"
try:
t = Thread(target=self.setup_client, args=(3,address,), name="MainThread")
t.start()
(mux_select, listener) = self.setup_server(address)
for i in xrange(len(self.client_messages)):
self.wait_for_next_accept(listener, mux_select)
mock_sock = listener.accept()[0]
(rl, _, _) = mux_select.select([mock_sock], [], [])
start = last = time.time()
while mock_sock not in rl:
if time.time() - start > 5:
self.fail("Did not find socket in rl in 5 seconds")
elif time.time() - last > 1:
log.debug("waiting for socket %s in rl %s..." % ( str(mock_sock), repr(rl)))
last = time.time()
(rl, _, _) = mux_select.select([mock_sock], [], [])
time.sleep(0.05)
d = mock_sock.recv(2048)
# order should be deterministic
self.assertEqual(self.client_messages[i], d)
finally:
if ServerSocketDemultiplexer.instance is not None:
ServerSocketDemultiplexer.instance = None
try:
os.unlink(address)
except OSError:
if os.path.exists(address):
raise RuntimeError("can't remove PIPE socket %s" % str(address))
|
jmiserez/sts
|
tests/unit/sts/socket_multiplexer_test.py
|
Python
|
apache-2.0
| 4,969
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
import unittest
from pants.option.option_value_container import OptionValueContainer
from pants.option.ranked_value import RankedValue
class OptionValueContainerTest(unittest.TestCase):
def test_standard_values(self):
o = OptionValueContainer()
o.foo = 1
self.assertEqual(1, o.foo)
with self.assertRaises(AttributeError):
o.bar
def test_value_ranking(self):
o = OptionValueContainer()
o.foo = RankedValue(RankedValue.CONFIG, 11)
self.assertEqual(11, o.foo)
self.assertEqual(RankedValue.CONFIG, o.get_rank('foo'))
o.foo = RankedValue(RankedValue.HARDCODED, 22)
self.assertEqual(11, o.foo)
self.assertEqual(RankedValue.CONFIG, o.get_rank('foo'))
o.foo = RankedValue(RankedValue.ENVIRONMENT, 33)
self.assertEqual(33, o.foo)
self.assertEqual(RankedValue.ENVIRONMENT, o.get_rank('foo'))
o.foo = 44 # No explicit rank is assumed to be a FLAG.
self.assertEqual(44, o.foo)
self.assertEqual(RankedValue.FLAG, o.get_rank('foo'))
def test_is_flagged(self):
o = OptionValueContainer()
o.foo = RankedValue(RankedValue.NONE, 11)
self.assertFalse(o.is_flagged('foo'))
o.foo = RankedValue(RankedValue.CONFIG, 11)
self.assertFalse(o.is_flagged('foo'))
o.foo = RankedValue(RankedValue.ENVIRONMENT, 11)
self.assertFalse(o.is_flagged('foo'))
o.foo = RankedValue(RankedValue.FLAG, 11)
self.assertTrue(o.is_flagged('foo'))
def test_indexing(self):
o = OptionValueContainer()
o.foo = 1
self.assertEqual(1, o['foo'])
self.assertEqual(1, o.get('foo'))
self.assertEqual(1, o.get('foo', 2))
self.assertIsNone(o.get('unknown'))
self.assertEqual(2, o.get('unknown', 2))
with self.assertRaises(AttributeError):
o['bar']
def test_iterator(self):
o = OptionValueContainer()
o.a = 3
o.b = 2
o.c = 1
names = list(iter(o))
self.assertListEqual(['a', 'b', 'c'], names)
def test_copy(self):
# copy semantics can get hairy when overriding __setattr__/__getattr__, so we test them.
o = OptionValueContainer()
o.foo = 1
o.bar = {'a': 111}
p = copy.copy(o)
# Verify that the result is in fact a copy.
self.assertEqual(1, p.foo) # Has original attribute.
o.baz = 42
self.assertFalse(hasattr(p, 'baz')) # Does not have attribute added after the copy.
# Verify that it's a shallow copy by modifying a referent in o and reading it in p.
o.bar['b'] = 222
self.assertEqual({'a': 111, 'b': 222}, p.bar)
def test_deepcopy(self):
# copy semantics can get hairy when overriding __setattr__/__getattr__, so we test them.
o = OptionValueContainer()
o.foo = 1
o.bar = {'a': 111}
p = copy.deepcopy(o)
# Verify that the result is in fact a copy.
self.assertEqual(1, p.foo) # Has original attribute.
o.baz = 42
self.assertFalse(hasattr(p, 'baz')) # Does not have attribute added after the copy.
# Verify that it's a deep copy by modifying a referent in o and reading it in p.
o.bar['b'] = 222
self.assertEqual({'a': 111}, p.bar)
|
jessrosenfield/pants
|
tests/python/pants_test/option/test_option_value_container.py
|
Python
|
apache-2.0
| 3,394
|
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
engine = create_engine('sqlite:////tmp/db.sqlite', echo=True)
Base = declarative_base()
Session = sessionmaker(bind=engine)
|
reykjavik-university/2014-T-514-VEFT
|
Week10/examples/sqlalchemy-elasticsearch-example/db.py
|
Python
|
apache-2.0
| 258
|
import logging
import time
from collections import OrderedDict, defaultdict
from datetime import datetime, timedelta
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
from django.conf import settings
from django.db import connection
from django.db.models import F
from psycopg2.sql import SQL, Composable, Identifier, Literal
from analytics.models import (
BaseCount,
FillState,
InstallationCount,
RealmCount,
StreamCount,
UserCount,
installation_epoch,
last_successful_fill,
)
from zerver.lib.logging_util import log_to_file
from zerver.lib.timestamp import ceiling_to_day, ceiling_to_hour, floor_to_hour, verify_UTC
from zerver.models import (
Message,
Realm,
RealmAuditLog,
Stream,
UserActivityInterval,
UserProfile,
models,
)
## Logging setup ##
logger = logging.getLogger('zulip.management')
log_to_file(logger, settings.ANALYTICS_LOG_PATH)
# You can't subtract timedelta.max from a datetime, so use this instead
TIMEDELTA_MAX = timedelta(days=365*1000)
## Class definitions ##
class CountStat:
HOUR = 'hour'
DAY = 'day'
FREQUENCIES = frozenset([HOUR, DAY])
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta]=None) -> None:
self.property = property
self.data_collector = data_collector
# might have to do something different for bitfields
if frequency not in self.FREQUENCIES:
raise AssertionError(f"Unknown frequency: {frequency}")
self.frequency = frequency
if interval is not None:
self.interval = interval
elif frequency == CountStat.HOUR:
self.interval = timedelta(hours=1)
else: # frequency == CountStat.DAY
self.interval = timedelta(days=1)
def __str__(self) -> str:
return f"<CountStat: {self.property}>"
class LoggingCountStat(CountStat):
def __init__(self, property: str, output_table: Type[BaseCount], frequency: str) -> None:
CountStat.__init__(self, property, DataCollector(output_table, None), frequency)
class DependentCountStat(CountStat):
def __init__(self, property: str, data_collector: 'DataCollector', frequency: str,
interval: Optional[timedelta] = None, dependencies: Sequence[str] = []) -> None:
CountStat.__init__(self, property, data_collector, frequency, interval=interval)
self.dependencies = dependencies
class DataCollector:
def __init__(self, output_table: Type[BaseCount],
pull_function: Optional[Callable[[str, datetime, datetime, Optional[Realm]], int]]) -> None:
self.output_table = output_table
self.pull_function = pull_function
## CountStat-level operations ##
def process_count_stat(stat: CountStat, fill_to_time: datetime,
realm: Optional[Realm]=None) -> None:
# TODO: The realm argument is not yet supported, in that we don't
# have a solution for how to update FillState if it is passed. It
# exists solely as partial plumbing for when we do fully implement
# doing single-realm analytics runs for use cases like data import.
#
# Also, note that for the realm argument to be properly supported,
# the CountStat object passed in needs to have come from
# E.g. get_count_stats(realm), i.e. have the realm_id already
# entered into the SQL query defined by the CountState object.
if stat.frequency == CountStat.HOUR:
time_increment = timedelta(hours=1)
elif stat.frequency == CountStat.DAY:
time_increment = timedelta(days=1)
else:
raise AssertionError(f"Unknown frequency: {stat.frequency}")
verify_UTC(fill_to_time)
if floor_to_hour(fill_to_time) != fill_to_time:
raise ValueError(f"fill_to_time must be on an hour boundary: {fill_to_time}")
fill_state = FillState.objects.filter(property=stat.property).first()
if fill_state is None:
currently_filled = installation_epoch()
fill_state = FillState.objects.create(property=stat.property,
end_time=currently_filled,
state=FillState.DONE)
logger.info("INITIALIZED %s %s", stat.property, currently_filled)
elif fill_state.state == FillState.STARTED:
logger.info("UNDO START %s %s", stat.property, fill_state.end_time)
do_delete_counts_at_hour(stat, fill_state.end_time)
currently_filled = fill_state.end_time - time_increment
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
logger.info("UNDO DONE %s", stat.property)
elif fill_state.state == FillState.DONE:
currently_filled = fill_state.end_time
else:
raise AssertionError(f"Unknown value for FillState.state: {fill_state.state}.")
if isinstance(stat, DependentCountStat):
for dependency in stat.dependencies:
dependency_fill_time = last_successful_fill(dependency)
if dependency_fill_time is None:
logger.warning("DependentCountStat %s run before dependency %s.",
stat.property, dependency)
return
fill_to_time = min(fill_to_time, dependency_fill_time)
currently_filled = currently_filled + time_increment
while currently_filled <= fill_to_time:
logger.info("START %s %s", stat.property, currently_filled)
start = time.time()
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
do_fill_count_stat_at_hour(stat, currently_filled, realm)
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
end = time.time()
currently_filled = currently_filled + time_increment
logger.info("DONE %s (%dms)", stat.property, (end-start)*1000)
def do_update_fill_state(fill_state: FillState, end_time: datetime, state: int) -> None:
fill_state.end_time = end_time
fill_state.state = state
fill_state.save()
# We assume end_time is valid (e.g. is on a day or hour boundary as appropriate)
# and is timezone aware. It is the caller's responsibility to enforce this!
def do_fill_count_stat_at_hour(stat: CountStat, end_time: datetime, realm: Optional[Realm]=None) -> None:
start_time = end_time - stat.interval
if not isinstance(stat, LoggingCountStat):
timer = time.time()
assert(stat.data_collector.pull_function is not None)
rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time, realm)
logger.info("%s run pull_function (%dms/%sr)",
stat.property, (time.time()-timer)*1000, rows_added)
do_aggregate_to_summary_table(stat, end_time, realm)
def do_delete_counts_at_hour(stat: CountStat, end_time: datetime) -> None:
if isinstance(stat, LoggingCountStat):
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
if stat.data_collector.output_table in [UserCount, StreamCount]:
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
else:
UserCount.objects.filter(property=stat.property, end_time=end_time).delete()
StreamCount.objects.filter(property=stat.property, end_time=end_time).delete()
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
def do_aggregate_to_summary_table(stat: CountStat, end_time: datetime,
realm: Optional[Realm]=None) -> None:
cursor = connection.cursor()
# Aggregate into RealmCount
output_table = stat.data_collector.output_table
if realm is not None:
realm_clause = SQL("AND zerver_realm.id = {}").format(Literal(realm.id))
else:
realm_clause = SQL("")
if output_table in (UserCount, StreamCount):
realmcount_query = SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, COALESCE(sum({output_table}.value), 0), %(property)s,
{output_table}.subgroup, %(end_time)s
FROM zerver_realm
JOIN {output_table}
ON
zerver_realm.id = {output_table}.realm_id
WHERE
{output_table}.property = %(property)s AND
{output_table}.end_time = %(end_time)s
{realm_clause}
GROUP BY zerver_realm.id, {output_table}.subgroup
""").format(
output_table=Identifier(output_table._meta.db_table),
realm_clause=realm_clause,
)
start = time.time()
cursor.execute(realmcount_query, {
'property': stat.property,
'end_time': end_time,
})
end = time.time()
logger.info(
"%s RealmCount aggregation (%dms/%sr)",
stat.property, (end - start) * 1000, cursor.rowcount,
)
if realm is None:
# Aggregate into InstallationCount. Only run if we just
# processed counts for all realms.
#
# TODO: Add support for updating installation data after
# changing an individual realm's values.
installationcount_query = SQL("""
INSERT INTO analytics_installationcount
(value, property, subgroup, end_time)
SELECT
sum(value), %(property)s, analytics_realmcount.subgroup, %(end_time)s
FROM analytics_realmcount
WHERE
property = %(property)s AND
end_time = %(end_time)s
GROUP BY analytics_realmcount.subgroup
""")
start = time.time()
cursor.execute(installationcount_query, {
'property': stat.property,
'end_time': end_time,
})
end = time.time()
logger.info(
"%s InstallationCount aggregation (%dms/%sr)",
stat.property, (end - start) * 1000, cursor.rowcount,
)
cursor.close()
## Utility functions called from outside counts.py ##
# called from zerver/lib/actions.py; should not throw any errors
def do_increment_logging_stat(zerver_object: Union[Realm, UserProfile, Stream], stat: CountStat,
subgroup: Optional[Union[str, int, bool]], event_time: datetime,
increment: int=1) -> None:
if not increment:
return
table = stat.data_collector.output_table
if table == RealmCount:
id_args = {'realm': zerver_object}
elif table == UserCount:
id_args = {'realm': zerver_object.realm, 'user': zerver_object}
else: # StreamCount
id_args = {'realm': zerver_object.realm, 'stream': zerver_object}
if stat.frequency == CountStat.DAY:
end_time = ceiling_to_day(event_time)
else: # CountStat.HOUR:
end_time = ceiling_to_hour(event_time)
row, created = table.objects.get_or_create(
property=stat.property, subgroup=subgroup, end_time=end_time,
defaults={'value': increment}, **id_args)
if not created:
row.value = F('value') + increment
row.save(update_fields=['value'])
def do_drop_all_analytics_tables() -> None:
UserCount.objects.all().delete()
StreamCount.objects.all().delete()
RealmCount.objects.all().delete()
InstallationCount.objects.all().delete()
FillState.objects.all().delete()
def do_drop_single_stat(property: str) -> None:
UserCount.objects.filter(property=property).delete()
StreamCount.objects.filter(property=property).delete()
RealmCount.objects.filter(property=property).delete()
InstallationCount.objects.filter(property=property).delete()
FillState.objects.filter(property=property).delete()
## DataCollector-level operations ##
QueryFn = Callable[[Dict[str, Composable]], Composable]
def do_pull_by_sql_query(
property: str,
start_time: datetime,
end_time: datetime,
query: QueryFn,
group_by: Optional[Tuple[models.Model, str]],
) -> int:
if group_by is None:
subgroup = SQL('NULL')
group_by_clause = SQL('')
else:
subgroup = Identifier(group_by[0]._meta.db_table, group_by[1])
group_by_clause = SQL(', {}').format(subgroup)
# We do string replacement here because cursor.execute will reject a
# group_by_clause given as a param.
# We pass in the datetimes as params to cursor.execute so that we don't have to
# think about how to convert python datetimes to SQL datetimes.
query_ = query({
'subgroup': subgroup,
'group_by_clause': group_by_clause,
})
cursor = connection.cursor()
cursor.execute(query_, {
'property': property,
'time_start': start_time,
'time_end': end_time,
})
rowcount = cursor.rowcount
cursor.close()
return rowcount
def sql_data_collector(
output_table: Type[BaseCount],
query: QueryFn,
group_by: Optional[Tuple[models.Model, str]],
) -> DataCollector:
def pull_function(property: str, start_time: datetime, end_time: datetime,
realm: Optional[Realm] = None) -> int:
# The pull function type needs to accept a Realm argument
# because the 'minutes_active::day' CountStat uses
# DataCollector directly for do_pull_minutes_active, which
# requires the realm argument. We ignore it here, because the
# realm should have been already encoded in the `query` we're
# passed.
return do_pull_by_sql_query(property, start_time, end_time, query, group_by)
return DataCollector(output_table, pull_function)
def do_pull_minutes_active(property: str, start_time: datetime, end_time: datetime,
realm: Optional[Realm] = None) -> int:
user_activity_intervals = UserActivityInterval.objects.filter(
end__gt=start_time, start__lt=end_time,
).select_related(
'user_profile',
).values_list(
'user_profile_id', 'user_profile__realm_id', 'start', 'end')
seconds_active: Dict[Tuple[int, int], float] = defaultdict(float)
for user_id, realm_id, interval_start, interval_end in user_activity_intervals:
if realm is None or realm.id == realm_id:
start = max(start_time, interval_start)
end = min(end_time, interval_end)
seconds_active[(user_id, realm_id)] += (end - start).total_seconds()
rows = [UserCount(user_id=ids[0], realm_id=ids[1], property=property,
end_time=end_time, value=int(seconds // 60))
for ids, seconds in seconds_active.items() if seconds >= 60]
UserCount.objects.bulk_create(rows)
return len(rows)
def count_message_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, count(*),
%(property)s, {subgroup}, %(time_end)s
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id
WHERE
zerver_userprofile.date_joined < %(time_end)s AND
zerver_message.date_sent >= %(time_start)s AND
{realm_clause}
zerver_message.date_sent < %(time_end)s
GROUP BY zerver_userprofile.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
# Note: ignores the group_by / group_by_clause.
def count_message_type_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(realm_id, user_id, value, property, subgroup, end_time)
SELECT realm_id, id, SUM(count) AS value, %(property)s, message_type, %(time_end)s
FROM
(
SELECT zerver_userprofile.realm_id, zerver_userprofile.id, count(*),
CASE WHEN
zerver_recipient.type = 1 THEN 'private_message'
WHEN
zerver_recipient.type = 3 THEN 'huddle_message'
WHEN
zerver_stream.invite_only = TRUE THEN 'private_stream'
ELSE 'public_stream'
END
message_type
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id AND
zerver_message.date_sent >= %(time_start)s AND
{realm_clause}
zerver_message.date_sent < %(time_end)s
JOIN zerver_recipient
ON
zerver_message.recipient_id = zerver_recipient.id
LEFT JOIN zerver_stream
ON
zerver_recipient.type_id = zerver_stream.id
GROUP BY
zerver_userprofile.realm_id, zerver_userprofile.id,
zerver_recipient.type, zerver_stream.invite_only
) AS subquery
GROUP BY realm_id, id, message_type
""").format(**kwargs, realm_clause=realm_clause)
# This query joins to the UserProfile table since all current queries that
# use this also subgroup on UserProfile.is_bot. If in the future there is a
# stat that counts messages by stream and doesn't need the UserProfile
# table, consider writing a new query for efficiency.
def count_message_by_stream_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_stream.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_streamcount
(stream_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_stream.id, zerver_stream.realm_id, count(*), %(property)s, {subgroup}, %(time_end)s
FROM zerver_stream
JOIN zerver_recipient
ON
zerver_stream.id = zerver_recipient.type_id
JOIN zerver_message
ON
zerver_recipient.id = zerver_message.recipient_id
JOIN zerver_userprofile
ON
zerver_message.sender_id = zerver_userprofile.id
WHERE
zerver_stream.date_created < %(time_end)s AND
zerver_recipient.type = 2 AND
zerver_message.date_sent >= %(time_start)s AND
{realm_clause}
zerver_message.date_sent < %(time_end)s
GROUP BY zerver_stream.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
# Hardcodes the query needed by active_users:is_bot:day, since that is
# currently the only stat that uses this.
def count_user_by_realm_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*), %(property)s, {subgroup}, %(time_end)s
FROM zerver_realm
JOIN zerver_userprofile
ON
zerver_realm.id = zerver_userprofile.realm_id
WHERE
zerver_realm.date_created < %(time_end)s AND
zerver_userprofile.date_joined >= %(time_start)s AND
zerver_userprofile.date_joined < %(time_end)s AND
{realm_clause}
zerver_userprofile.is_active = TRUE
GROUP BY zerver_realm.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
# Currently hardcodes the query needed for active_users_audit:is_bot:day.
# Assumes that a user cannot have two RealmAuditLog entries with the same event_time and
# event_type in [RealmAuditLog.USER_CREATED, USER_DEACTIVATED, etc].
# In particular, it's important to ensure that migrations don't cause that to happen.
def check_realmauditlog_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
ral1.modified_user_id, ral1.realm_id, 1, %(property)s, {subgroup}, %(time_end)s
FROM zerver_realmauditlog ral1
JOIN (
SELECT modified_user_id, max(event_time) AS max_event_time
FROM zerver_realmauditlog
WHERE
event_type in ({user_created}, {user_activated}, {user_deactivated}, {user_reactivated}) AND
{realm_clause}
event_time < %(time_end)s
GROUP BY modified_user_id
) ral2
ON
ral1.event_time = max_event_time AND
ral1.modified_user_id = ral2.modified_user_id
JOIN zerver_userprofile
ON
ral1.modified_user_id = zerver_userprofile.id
WHERE
ral1.event_type in ({user_created}, {user_activated}, {user_reactivated})
""").format(
**kwargs,
user_created=Literal(RealmAuditLog.USER_CREATED),
user_activated=Literal(RealmAuditLog.USER_ACTIVATED),
user_deactivated=Literal(RealmAuditLog.USER_DEACTIVATED),
user_reactivated=Literal(RealmAuditLog.USER_REACTIVATED),
realm_clause=realm_clause,
)
def check_useractivityinterval_by_user_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("zerver_userprofile.realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, 1, %(property)s, {subgroup}, %(time_end)s
FROM zerver_userprofile
JOIN zerver_useractivityinterval
ON
zerver_userprofile.id = zerver_useractivityinterval.user_profile_id
WHERE
zerver_useractivityinterval.end >= %(time_start)s AND
{realm_clause}
zerver_useractivityinterval.start < %(time_end)s
GROUP BY zerver_userprofile.id {group_by_clause}
""").format(**kwargs, realm_clause=realm_clause)
def count_realm_active_humans_query(realm: Optional[Realm]) -> QueryFn:
if realm is None:
realm_clause = SQL("")
else:
realm_clause = SQL("realm_id = {} AND").format(Literal(realm.id))
return lambda kwargs: SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
usercount1.realm_id, count(*), %(property)s, NULL, %(time_end)s
FROM (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = 'active_users_audit:is_bot:day' AND
subgroup = 'false' AND
{realm_clause}
end_time = %(time_end)s
) usercount1
JOIN (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = '15day_actives::day' AND
{realm_clause}
end_time = %(time_end)s
) usercount2
ON
usercount1.user_id = usercount2.user_id
GROUP BY usercount1.realm_id
""").format(**kwargs, realm_clause=realm_clause)
# Currently unused and untested
count_stream_by_realm_query = lambda kwargs: SQL("""
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*), %(property)s, {subgroup}, %(time_end)s
FROM zerver_realm
JOIN zerver_stream
ON
zerver_realm.id = zerver_stream.realm_id AND
WHERE
zerver_realm.date_created < %(time_end)s AND
zerver_stream.date_created >= %(time_start)s AND
zerver_stream.date_created < %(time_end)s
GROUP BY zerver_realm.id {group_by_clause}
""").format(**kwargs)
def get_count_stats(realm: Optional[Realm]=None) -> Dict[str, CountStat]:
## CountStat declarations ##
count_stats_ = [
# Messages sent stats
# Stats that count the number of messages sent in various ways.
# These are also the set of stats that read from the Message table.
CountStat('messages_sent:is_bot:hour',
sql_data_collector(UserCount, count_message_by_user_query(
realm), (UserProfile, 'is_bot')),
CountStat.HOUR),
CountStat('messages_sent:message_type:day',
sql_data_collector(
UserCount, count_message_type_by_user_query(realm), None),
CountStat.DAY),
CountStat('messages_sent:client:day',
sql_data_collector(UserCount, count_message_by_user_query(realm),
(Message, 'sending_client_id')), CountStat.DAY),
CountStat('messages_in_stream:is_bot:day',
sql_data_collector(StreamCount, count_message_by_stream_query(realm),
(UserProfile, 'is_bot')), CountStat.DAY),
# Number of users stats
# Stats that count the number of active users in the UserProfile.is_active sense.
# 'active_users_audit:is_bot:day' is the canonical record of which users were
# active on which days (in the UserProfile.is_active sense).
# Important that this stay a daily stat, so that 'realm_active_humans::day' works as expected.
CountStat('active_users_audit:is_bot:day',
sql_data_collector(UserCount, check_realmauditlog_by_user_query(
realm), (UserProfile, 'is_bot')),
CountStat.DAY),
# Important note: LoggingCountStat objects aren't passed the
# Realm argument, because by nature they have a logging
# structure, not a pull-from-database structure, so there's no
# way to compute them for a single realm after the fact (the
# use case for passing a Realm argument).
# Sanity check on 'active_users_audit:is_bot:day', and a archetype for future LoggingCountStats.
# In RealmCount, 'active_users_audit:is_bot:day' should be the partial
# sum sequence of 'active_users_log:is_bot:day', for any realm that
# started after the latter stat was introduced.
LoggingCountStat('active_users_log:is_bot:day',
RealmCount, CountStat.DAY),
# Another sanity check on 'active_users_audit:is_bot:day'. Is only an
# approximation, e.g. if a user is deactivated between the end of the
# day and when this stat is run, they won't be counted. However, is the
# simplest of the three to inspect by hand.
CountStat('active_users:is_bot:day',
sql_data_collector(RealmCount, count_user_by_realm_query(realm), (UserProfile, 'is_bot')),
CountStat.DAY, interval=TIMEDELTA_MAX),
# Messages read stats. messages_read::hour is the total
# number of messages read, whereas
# messages_read_interactions::hour tries to count the total
# number of UI interactions resulting in messages being marked
# as read (imperfect because of batching of some request
# types, but less likely to be overwhelmed by a single bulk
# operation).
LoggingCountStat('messages_read::hour', UserCount, CountStat.HOUR),
LoggingCountStat('messages_read_interactions::hour', UserCount, CountStat.HOUR),
# User activity stats
# Stats that measure user activity in the UserActivityInterval sense.
CountStat('1day_actives::day',
sql_data_collector(
UserCount, check_useractivityinterval_by_user_query(realm), None),
CountStat.DAY, interval=timedelta(days=1)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('7day_actives::day',
sql_data_collector(
UserCount, check_useractivityinterval_by_user_query(realm), None),
CountStat.DAY, interval=timedelta(days=7)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('15day_actives::day',
sql_data_collector(
UserCount, check_useractivityinterval_by_user_query(realm), None),
CountStat.DAY, interval=timedelta(days=15)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('minutes_active::day', DataCollector(
UserCount, do_pull_minutes_active), CountStat.DAY),
# Rate limiting stats
# Used to limit the number of invitation emails sent by a realm
LoggingCountStat('invites_sent::day', RealmCount, CountStat.DAY),
# Dependent stats
# Must come after their dependencies.
# Canonical account of the number of active humans in a realm on each day.
DependentCountStat('realm_active_humans::day',
sql_data_collector(
RealmCount, count_realm_active_humans_query(realm), None),
CountStat.DAY,
dependencies=['active_users_audit:is_bot:day', '15day_actives::day']),
]
return OrderedDict((stat.property, stat) for stat in count_stats_)
# To avoid refactoring for now COUNT_STATS can be used as before
COUNT_STATS = get_count_stats()
|
showell/zulip
|
analytics/lib/counts.py
|
Python
|
apache-2.0
| 29,578
|
"""Tests for the update coordinator."""
import asyncio
from datetime import timedelta
import logging
import urllib.error
import aiohttp
import pytest
import requests
from homeassistant.helpers import update_coordinator
from homeassistant.util.dt import utcnow
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import async_fire_time_changed
_LOGGER = logging.getLogger(__name__)
def get_crd(hass, update_interval):
"""Make coordinator mocks."""
calls = 0
async def refresh() -> int:
nonlocal calls
calls += 1
return calls
crd = update_coordinator.DataUpdateCoordinator[int](
hass,
_LOGGER,
name="test",
update_method=refresh,
update_interval=update_interval,
)
return crd
DEFAULT_UPDATE_INTERVAL = timedelta(seconds=10)
@pytest.fixture
def crd(hass):
"""Coordinator mock with default update interval."""
return get_crd(hass, DEFAULT_UPDATE_INTERVAL)
@pytest.fixture
def crd_without_update_interval(hass):
"""Coordinator mock that never automatically updates."""
return get_crd(hass, None)
async def test_async_refresh(crd):
"""Test async_refresh for update coordinator."""
assert crd.data is None
await crd.async_refresh()
assert crd.data == 1
assert crd.last_update_success is True
# Make sure we didn't schedule a refresh because we have 0 listeners
assert crd._unsub_refresh is None
updates = []
def update_callback():
updates.append(crd.data)
unsub = crd.async_add_listener(update_callback)
await crd.async_refresh()
assert updates == [2]
assert crd._unsub_refresh is not None
# Test unsubscribing through function
unsub()
await crd.async_refresh()
assert updates == [2]
# Test unsubscribing through method
crd.async_add_listener(update_callback)
crd.async_remove_listener(update_callback)
await crd.async_refresh()
assert updates == [2]
async def test_request_refresh(crd):
"""Test request refresh for update coordinator."""
assert crd.data is None
await crd.async_request_refresh()
assert crd.data == 1
assert crd.last_update_success is True
# Second time we hit the debonuce
await crd.async_request_refresh()
assert crd.data == 1
assert crd.last_update_success is True
async def test_request_refresh_no_auto_update(crd_without_update_interval):
"""Test request refresh for update coordinator without automatic update."""
crd = crd_without_update_interval
assert crd.data is None
await crd.async_request_refresh()
assert crd.data == 1
assert crd.last_update_success is True
# Second time we hit the debonuce
await crd.async_request_refresh()
assert crd.data == 1
assert crd.last_update_success is True
@pytest.mark.parametrize(
"err_msg",
[
(asyncio.TimeoutError, "Timeout fetching test data"),
(requests.exceptions.Timeout, "Timeout fetching test data"),
(urllib.error.URLError("timed out"), "Timeout fetching test data"),
(aiohttp.ClientError, "Error requesting test data"),
(requests.exceptions.RequestException, "Error requesting test data"),
(urllib.error.URLError("something"), "Error requesting test data"),
(update_coordinator.UpdateFailed, "Error fetching test data"),
],
)
async def test_refresh_known_errors(err_msg, crd, caplog):
"""Test raising known errors."""
crd.update_method = AsyncMock(side_effect=err_msg[0])
await crd.async_refresh()
assert crd.data is None
assert crd.last_update_success is False
assert err_msg[1] in caplog.text
async def test_refresh_fail_unknown(crd, caplog):
"""Test raising unknown error."""
await crd.async_refresh()
crd.update_method = AsyncMock(side_effect=ValueError)
await crd.async_refresh()
assert crd.data == 1 # value from previous fetch
assert crd.last_update_success is False
assert "Unexpected error fetching test data" in caplog.text
async def test_refresh_no_update_method(crd):
"""Test raising error is no update method is provided."""
await crd.async_refresh()
crd.update_method = None
with pytest.raises(NotImplementedError):
await crd.async_refresh()
async def test_update_interval(hass, crd):
"""Test update interval works."""
# Test we don't update without subscriber
async_fire_time_changed(hass, utcnow() + crd.update_interval)
await hass.async_block_till_done()
assert crd.data is None
# Add subscriber
update_callback = Mock()
crd.async_add_listener(update_callback)
# Test twice we update with subscriber
async_fire_time_changed(hass, utcnow() + crd.update_interval)
await hass.async_block_till_done()
assert crd.data == 1
async_fire_time_changed(hass, utcnow() + crd.update_interval)
await hass.async_block_till_done()
assert crd.data == 2
# Test removing listener
crd.async_remove_listener(update_callback)
async_fire_time_changed(hass, utcnow() + crd.update_interval)
await hass.async_block_till_done()
# Test we stop updating after we lose last subscriber
assert crd.data == 2
async def test_update_interval_not_present(hass, crd_without_update_interval):
"""Test update never happens with no update interval."""
crd = crd_without_update_interval
# Test we don't update without subscriber with no update interval
async_fire_time_changed(hass, utcnow() + DEFAULT_UPDATE_INTERVAL)
await hass.async_block_till_done()
assert crd.data is None
# Add subscriber
update_callback = Mock()
crd.async_add_listener(update_callback)
# Test twice we don't update with subscriber with no update interval
async_fire_time_changed(hass, utcnow() + DEFAULT_UPDATE_INTERVAL)
await hass.async_block_till_done()
assert crd.data is None
async_fire_time_changed(hass, utcnow() + DEFAULT_UPDATE_INTERVAL)
await hass.async_block_till_done()
assert crd.data is None
# Test removing listener
crd.async_remove_listener(update_callback)
async_fire_time_changed(hass, utcnow() + DEFAULT_UPDATE_INTERVAL)
await hass.async_block_till_done()
# Test we stop don't update after we lose last subscriber
assert crd.data is None
async def test_refresh_recover(crd, caplog):
"""Test recovery of freshing data."""
crd.last_update_success = False
await crd.async_refresh()
assert crd.last_update_success is True
assert "Fetching test data recovered" in caplog.text
async def test_coordinator_entity(crd):
"""Test the CoordinatorEntity class."""
entity = update_coordinator.CoordinatorEntity(crd)
assert entity.should_poll is False
crd.last_update_success = False
assert entity.available is False
await entity.async_update()
assert entity.available is True
with patch(
"homeassistant.helpers.entity.Entity.async_on_remove"
) as mock_async_on_remove:
await entity.async_added_to_hass()
assert mock_async_on_remove.called
# Verify we do not update if the entity is disabled
crd.last_update_success = False
with patch("homeassistant.helpers.entity.Entity.enabled", False):
await entity.async_update()
assert entity.available is False
|
GenericStudent/home-assistant
|
tests/helpers/test_update_coordinator.py
|
Python
|
apache-2.0
| 7,348
|
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.common import ovs_lib
from neutron.common import config
from neutron.conf.agent import cmd
from neutron.conf.agent import common as agent_config
from neutron.conf.agent.l3 import config as l3_config
from neutron.conf.plugins.ml2.drivers import ovs_conf
from neutron.conf import service as service_config
LOG = logging.getLogger(__name__)
# Default ovsdb_timeout value for this script.
# It allows to clean bridges with even thousands of ports.
CLEANUP_OVSDB_TIMEOUT = 600
def setup_conf():
"""Setup the cfg for the clean up utility.
Use separate setup_conf for the utility because there are many options
from the main config that do not apply during clean-up.
"""
conf = cfg.CONF
cmd.register_cmd_opts(cmd.ovs_opts, conf)
l3_config.register_l3_agent_config_opts(l3_config.OPTS, conf)
agent_config.register_interface_driver_opts_helper(conf)
agent_config.register_interface_opts()
service_config.register_service_opts(service_config.RPC_EXTRA_OPTS, conf)
ovs_conf.register_ovs_agent_opts(conf)
conf.set_default("ovsdb_timeout", CLEANUP_OVSDB_TIMEOUT, "OVS")
return conf
def main():
"""Main method for cleaning up OVS bridges.
The utility cleans up the integration bridges used by Neutron.
"""
conf = setup_conf()
conf()
config.setup_logging()
agent_config.setup_privsep()
do_main(conf)
def do_main(conf):
configuration_bridges = set([conf.OVS.integration_bridge])
ovs = ovs_lib.BaseOVS()
ovs_bridges = set(ovs.get_bridges())
available_configuration_bridges = configuration_bridges & ovs_bridges
if conf.ovs_all_ports:
bridges = ovs_bridges
else:
bridges = available_configuration_bridges
for bridge in bridges:
LOG.info("Cleaning bridge: %s", bridge)
ovs.ovsdb.ovs_cleanup(bridge,
conf.ovs_all_ports).execute(check_error=True)
LOG.info("OVS cleanup completed successfully")
|
mahak/neutron
|
neutron/cmd/ovs_cleanup.py
|
Python
|
apache-2.0
| 2,691
|
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import pandas as pd
import zipfile
import statsmodels.api as sm
def link_functions_binomial():
print("Read in prostate data.")
h2o_data = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip"))
h2o_data.head()
sm_data = pd.read_csv(zipfile.ZipFile(pyunit_utils.locate("smalldata/prostate/prostate_complete.csv.zip")).open("prostate_complete.csv")).as_matrix()
sm_data_response = sm_data[:,2]
sm_data_features = sm_data[:,[1,3,4,5,6,7,8,9]]
print("Testing for family: BINOMIAL")
print("Set variables for h2o.")
myY = "CAPSULE"
myX = ["ID","AGE","RACE","GLEASON","DCAPS","PSA","VOL","DPROS"]
print("Create models with canonical link: LOGIT")
h2o_model = h2o.glm(x=h2o_data[myX], y=h2o_data[myY].asfactor(), family="binomial", link="logit",alpha=[0.5], Lambda=[0])
sm_model = sm.GLM(endog=sm_data_response, exog=sm_data_features, family=sm.families.Binomial(sm.families.links.logit)).fit()
print("Compare model deviances for link function logit")
h2o_deviance = h2o_model.residual_deviance() / h2o_model.null_deviance()
sm_deviance = sm_model.deviance / sm_model.null_deviance
assert h2o_deviance - sm_deviance < 0.01, "expected h2o to have an equivalent or better deviance measures"
if __name__ == "__main__":
pyunit_utils.standalone_test(link_functions_binomial)
else:
link_functions_binomial()
|
madmax983/h2o-3
|
h2o-py/tests/testdir_algos/glm/pyunit_DEPRECATED_link_functions_binomialGLM.py
|
Python
|
apache-2.0
| 1,473
|
# -*- coding: utf-8 -*-
# Copyright 2017, Digital Reasoning
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
import logging
from django.conf import settings
from django.contrib.auth.models import Group
from django.http import Http404
from guardian.shortcuts import get_groups_with_perms, get_users_with_perms, remove_perm
from rest_framework import viewsets
from rest_framework.serializers import ListField, SlugRelatedField, ValidationError
from stackdio.api.users.models import get_user_queryset
from stackdio.core import fields, mixins, serializers
from stackdio.core.config import StackdioConfigException
from stackdio.core.permissions import StackdioPermissionsModelPermissions
from stackdio.core.shortcuts import get_groups_with_model_perms, get_users_with_model_perms
try:
from django_auth_ldap.backend import LDAPBackend
except ImportError:
LDAPBackend = None
logger = logging.getLogger(__name__)
def _filter_perms(available_perms, perms):
ret = []
for perm in perms:
if perm in available_perms:
ret.append(perm)
return ret
class UserSlugRelatedField(SlugRelatedField):
def to_internal_value(self, data):
try:
return super(UserSlugRelatedField, self).to_internal_value(data)
except ValidationError:
if settings.LDAP_ENABLED:
if LDAPBackend is None:
raise StackdioConfigException('LDAP is enabled, but django_auth_ldap isn\'t '
'installed. Please install django_auth_ldap')
# Grab the ldap user and try again
user = LDAPBackend().populate_user(data)
if user is not None:
return super(UserSlugRelatedField, self).to_internal_value(data)
# Nothing worked, just re-raise the exception
raise
class StackdioBasePermissionsViewSet(mixins.BulkUpdateModelMixin, viewsets.ModelViewSet):
"""
Viewset for creating permissions endpoints
"""
user_or_group = None
model_or_object = None
lookup_value_regex = r'[\w.@+-]+'
parent_lookup_field = 'pk'
parent_lookup_url_kwarg = None
def get_model_name(self):
raise NotImplementedError('`get_model_name()` must be implemented.')
def get_app_label(self):
raise NotImplementedError('`get_app_label()` must be implemented.')
def get_serializer_class(self):
user_or_group = self.get_user_or_group()
model_or_object = self.get_model_or_object()
model_name = self.get_model_name()
app_label = self.get_app_label()
super_cls = self.switch_model_object(serializers.StackdioModelPermissionsSerializer,
serializers.StackdioObjectPermissionsSerializer)
default_parent_lookup_url_kwarg = 'parent_{}'.format(self.parent_lookup_field)
url_field_kwargs = {
'view_name': 'api:{0}:{1}-{2}-{3}-permissions-detail'.format(
app_label,
model_name,
model_or_object,
user_or_group
),
'permission_lookup_field': self.lookup_field,
'permission_lookup_url_kwarg': self.lookup_url_kwarg or self.lookup_field,
'lookup_field': self.parent_lookup_field,
'lookup_url_kwarg': self.parent_lookup_url_kwarg or default_parent_lookup_url_kwarg,
}
url_field_cls = self.switch_model_object(
fields.HyperlinkedModelPermissionsField,
fields.HyperlinkedObjectPermissionsField,
)
# Create a class
class StackdioUserPermissionsSerializer(super_cls):
user = UserSlugRelatedField(slug_field='username', queryset=get_user_queryset())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'user'
class StackdioGroupPermissionsSerializer(super_cls):
group = SlugRelatedField(slug_field='name', queryset=Group.objects.all())
url = url_field_cls(**url_field_kwargs)
permissions = ListField()
class Meta(super_cls.Meta):
update_lookup_field = 'group'
return self.switch_user_group(StackdioUserPermissionsSerializer,
StackdioGroupPermissionsSerializer)
def get_user_or_group(self):
assert self.user_or_group in ('user', 'group'), (
"'%s' should include a `user_or_group` attribute that is one of 'user' or 'group'."
% self.__class__.__name__
)
return self.user_or_group
def switch_user_group(self, if_user, if_group):
return {
'user': if_user,
'group': if_group,
}.get(self.get_user_or_group())
def get_model_or_object(self):
assert self.model_or_object in ('model', 'object'), (
"'%s' should include a `model_or_object` attribute that is one of 'model' or 'object'."
% self.__class__.__name__
)
return self.model_or_object
def switch_model_object(self, if_model, if_object):
return {
'model': if_model,
'object': if_object,
}.get(self.get_model_or_object())
def _transform_perm(self, model_name):
def do_tranform(item):
# pylint: disable=unused-variable
perm, sep, empty = item.partition('_' + model_name)
return perm
return do_tranform
def get_object(self):
queryset = self.get_queryset()
url_kwarg = self.lookup_url_kwarg or self.lookup_field
name_attr = self.switch_user_group('username', 'name')
for obj in queryset:
auth_obj = obj[self.get_user_or_group()]
if self.kwargs[url_kwarg] == getattr(auth_obj, name_attr):
return obj
raise Http404('No permissions found for %s' % self.kwargs[url_kwarg])
class StackdioModelPermissionsViewSet(StackdioBasePermissionsViewSet):
model_cls = None
model_or_object = 'model'
permission_classes = (StackdioPermissionsModelPermissions,)
def get_model_cls(self):
assert self.model_cls, (
"'%s' should include a `model_cls` attribute or override the `get_model_cls()` method."
% self.__class__.__name__
)
return self.model_cls
def get_model_name(self):
return self.get_model_cls()._meta.model_name
def get_app_label(self):
ret = self.get_model_cls()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_model_permissions(self):
return getattr(self.get_model_cls(),
'model_permissions',
getattr(self, 'model_permissions', ()))
def get_permissions(self):
"""
Instantiates and returns the list of permissions that this view requires.
"""
ret = []
for permission_cls in self.permission_classes:
permission = permission_cls()
# Inject our model_cls into the permission
if isinstance(permission, StackdioPermissionsModelPermissions) \
and permission.model_cls is None:
permission.model_cls = self.model_cls
ret.append(permission)
return ret
def get_queryset(self): # pylint: disable=method-hidden
model_cls = self.get_model_cls()
model_name = model_cls._meta.model_name
model_perms = self.get_model_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_model_perms(model_cls, attach_perms=True,
with_group_users=False),
lambda: get_groups_with_model_perms(model_cls, attach_perms=True),
)
# Do this as a function so we don't fetch both the user AND group permissions on each
# request
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(model_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioModelPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_model_permissions())
return response
def perform_create(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_update(self, serializer):
serializer.save(model_cls=self.get_model_cls())
def perform_destroy(self, instance):
model_cls = self.get_model_cls()
app_label = model_cls._meta.app_label
model_name = model_cls._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()])
class StackdioModelUserPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioModelGroupPermissionsViewSet(StackdioModelPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
class StackdioObjectPermissionsViewSet(StackdioBasePermissionsViewSet):
"""
Viewset for creating permissions endpoints
"""
model_or_object = 'object'
def get_permissioned_object(self):
raise NotImplementedError('`get_permissioned_object()` must be implemented.')
def get_model_name(self):
return self.get_permissioned_object()._meta.model_name
def get_app_label(self):
ret = self.get_permissioned_object()._meta.app_label
if ret == 'auth':
# one-off thing, since users/groups are in the `users` app, not `auth`
return 'users'
return ret
def get_object_permissions(self):
return getattr(self.get_permissioned_object(),
'object_permissions',
getattr(self, 'object_permissions', ()))
def get_queryset(self): # pylint: disable=method-hidden
obj = self.get_permissioned_object()
model_name = obj._meta.model_name
object_perms = self.get_object_permissions()
# Grab the perms for either the users or groups
perm_map_func = self.switch_user_group(
lambda: get_users_with_perms(obj, attach_perms=True,
with_superusers=False, with_group_users=False),
lambda: get_groups_with_perms(obj, attach_perms=True),
)
perm_map = perm_map_func()
ret = []
sorted_perms = sorted(perm_map.items(), key=lambda x: getattr(x[0], self.lookup_field))
for auth_obj, perms in sorted_perms:
new_perms = [self._transform_perm(model_name)(perm) for perm in perms]
ret.append({
self.get_user_or_group(): auth_obj,
'permissions': _filter_perms(object_perms, new_perms),
})
return ret
def list(self, request, *args, **kwargs):
response = super(StackdioObjectPermissionsViewSet, self).list(request, *args, **kwargs)
# add available permissions to the response
response.data['available_permissions'] = sorted(self.get_object_permissions())
return response
def perform_create(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_update(self, serializer):
serializer.save(object=self.get_permissioned_object())
def perform_destroy(self, instance):
obj = self.get_permissioned_object()
app_label = obj._meta.app_label
model_name = obj._meta.model_name
for perm in instance['permissions']:
remove_perm('%s.%s_%s' % (app_label, perm, model_name),
instance[self.get_user_or_group()],
obj)
# pylint: disable=abstract-method
class StackdioObjectUserPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'user'
lookup_field = 'username'
lookup_url_kwarg = 'username'
class StackdioObjectGroupPermissionsViewSet(StackdioObjectPermissionsViewSet):
user_or_group = 'group'
lookup_field = 'name'
lookup_url_kwarg = 'groupname'
|
clarkperkins/stackdio
|
stackdio/core/viewsets.py
|
Python
|
apache-2.0
| 13,461
|
from .distillers import Distill, Distiller
class NewsDistiller(Distiller):
site = Distill("og:site_name")
title = Distill("s:headline", "og:title")
image_url = Distill("s:associatedMedia.ImageObject/url", "og:image")
pub_date = Distill("s:datePublished")
author = Distill("s:creator.Person/name", "s:author")
section = Distill("s:articleSection")
description = Distill("s:description", "og:description")
link = Distill("s:url", "og:url")
id = Distill("s:identifier")
class ParselyDistiller(Distiller):
site = Distill("og:site_name")
title = Distill("pp:title", "s:headline", "og:title")
image_url = Distill(
"pp:image_url", "s:associatedMedia.ImageObject/url", "og:image")
pub_date = Distill("pp:pub_date", "s:datePublished")
author = Distill("pp:author", "s:creator.Person/name", "s:author")
section = Distill("pp:section", "s:articleSection")
link = Distill("pp:link", "og:url", "s:url")
post_id = Distill("pp:post_id", "s:identifier")
page_type = Distill("pp:type")
|
emmett9001/schema.to
|
schemato/distillery.py
|
Python
|
apache-2.0
| 1,053
|
#!/usr/bin/env python2.7
import numpy as np
import matplotlib.pyplot as plt
Freq=np.array([30,40,45,50,53,55,60,65,70,80,90,95,98,100,110,120])
Db=np.array([70.5,78.6,83.2,88.4,87.5,86.7,85.2,83.9,85.1,88,95.7,100.4,100.4,99.2,94.7,94.9])
plt.xlabel('Frecuencia')
plt.ylabel('Decibel')
plt.title('DecibelvsFreq a 0.1volts')
#for i in range(len(Freq)):
# plt.text(Freq[i],Db[i], r'$Freq=%f, \ Db=%f$' % (Freq[i], Db[i]))
plt.axis([0, 330, 50, 130])
plt.plot(Freq,Db,'bo',Freq,Db,'k')
plt.grid(True)
plt.show()
|
P1R/cinves
|
TrabajoFinal/tubo350cm/2-DbvsFreq/tubo2huecos/DbvsFreq-Ampde0.1v-2huequitos.py
|
Python
|
apache-2.0
| 511
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import re
import logging
from collections import OrderedDict
import functools
import binascii
import calendar
import datetime
import hashlib
import base64
import os
try:
from urllib import quote as quoteURL
except ImportError:
from urllib.parse import quote as quoteURL #pylint: disable=no-name-in-module,import-error
# requests, apache2
import requests
# PyJWT, MIT, Jason Web Tokens, pip install PyJWT
import jwt
# cryptography, Apache License, Python Cryptography library,
#import cryptography
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
# settings, , load and save settings, internal
from yotta.lib import settings
# access_common, , things shared between different component access modules, internal
from yotta.lib import access_common
# Ordered JSON, , read & write json, internal
from yotta.lib import ordered_json
# export key, , export pycrypto keys, internal
from yotta.lib import exportkey
# globalconf, share global arguments between modules, internal
from yotta.lib import globalconf
Registry_Base_URL = 'https://registry.yottabuild.org'
Website_Base_URL = 'https://yotta.mbed.com'
_OpenSSH_Keyfile_Strip = re.compile(b"^(ssh-[a-z0-9]*\s+)|(\s+.+\@.+)|\n", re.MULTILINE)
logger = logging.getLogger('access')
# suppress logging from the requests library
logging.getLogger("requests").setLevel(logging.WARNING)
class AuthError(RuntimeError):
pass
# Internal functions
def generate_jwt_token(private_key, registry=None):
registry = registry or Registry_Base_URL
expires = calendar.timegm((datetime.datetime.utcnow() + datetime.timedelta(minutes=2)).timetuple())
prn = _fingerprint(private_key.public_key())
logger.debug('fingerprint: %s' % prn)
token_fields = {
"iss": 'yotta',
"aud": registry,
"prn": prn,
"exp": expires
}
logger.debug('token fields: %s' % token_fields)
private_key_pem = private_key.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption()
)
token = jwt.encode(token_fields, private_key_pem.decode('ascii'), 'RS256').decode('ascii')
logger.debug('encoded token: %s' % token)
return token
def _pubkeyWireFormat(pubkey):
pubk_numbers = pubkey.public_numbers()
logger.debug('openssh format publickey:\n%s' % exportkey.openSSH(pubk_numbers))
return quoteURL(_OpenSSH_Keyfile_Strip.sub(b'', exportkey.openSSH(pubk_numbers)))
def _fingerprint(pubkey):
stripped = _OpenSSH_Keyfile_Strip.sub(b'', exportkey.openSSH(pubkey.public_numbers()))
decoded = base64.b64decode(stripped)
khash = hashlib.md5(decoded).hexdigest()
return ':'.join([khash[i:i+2] for i in range(0, len(khash), 2)])
def _retryConnectionErrors(fn):
@functools.wraps(fn)
def wrapped(*args, **kwargs):
attempts_remaining = 5
delay = 0.1
while True:
attempts_remaining -= 1
try:
return fn(*args, **kwargs)
except requests.exceptions.ConnectionError as e:
errmessage = e.message
import socket
# try to format re-packaged get-address-info exceptions
# into a nice message (this will be the normal exception
# you see if you aren't connected to the internet)
try:
errmessage = str(e.message[1])
except Exception as e:
pass
if attempts_remaining:
logger.warning('connection error: %s, retrying...', errmessage)
else:
logger.error('connection error: %s', errmessage)
raise
except requests.exceptions.Timeout as e:
if attempts_remaining:
logger.warning('request timed out: %s, retrying...', e.message)
else:
logger.error('request timed out: %s', e.message)
raise
import time
time.sleep(delay)
delay = delay * 1.6 + 0.1
return wrapped
def _returnRequestError(fn):
''' Decorator that captures requests.exceptions.RequestException errors
and returns them as an error message. If no error occurs the reture
value of the wrapped function is returned (normally None). '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.RequestException as e:
return "server returned status %s: %s" % (e.response.status_code, e.message)
return wrapped
def _handleAuth(fn):
''' Decorator to re-try API calls after asking the user for authentication. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
# auth, , authenticate users, internal
from yotta.lib import auth
# if yotta is being run noninteractively, then we never retry, but we
# do call auth.authorizeUser, so that a login URL can be displayed:
interactive = globalconf.get('interactive')
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.debug('%s unauthorised', fn)
# any provider is sufficient for registry auth
auth.authorizeUser(provider=None, interactive=interactive)
if interactive:
logger.debug('retrying after authentication...')
return fn(*args, **kwargs)
raise
return wrapped
def _friendlyAuthError(fn):
''' Decorator to print a friendly you-are-not-authorised message. Use
**outside** the _handleAuth decorator to only print the message after
the user has been given a chance to login. '''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.error('insufficient permission')
elif e.response.status_code == requests.codes.bad and 'jwt has expired' in e.response.text.lower(): #pylint: disable=no-member
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
logger.error('Check that your system clock is set accurately!')
else:
logger.error('server returned status %s: %s', e.response.status_code, e.response.text)
raise
return wrapped
def _raiseUnavailableFor401(message):
''' Returns a decorator to swallow a requests exception for modules that
are not accessible without logging in, and turn it into an Unavailable
exception.
'''
def __raiseUnavailableFor401(fn):
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
if e.response.status_code == requests.codes.unauthorized:
raise access_common.Unavailable(message)
else:
raise
return wrapped
return __raiseUnavailableFor401
def _swallowRequestExceptions(fail_return=None):
def __swallowRequestExceptions(fn):
''' Decorator to swallow known exceptions: use with _friendlyAuthError,
returns non-None if an exception occurred
'''
@functools.wraps(fn)
def wrapped(*args, **kwargs):
try:
return fn(*args, **kwargs)
except requests.exceptions.HTTPError as e:
return fail_return
return wrapped
return __swallowRequestExceptions
def _getPrivateRegistryKey():
if 'YOTTA_PRIVATE_REGISTRY_API_KEY' in os.environ:
return os.environ['YOTTA_PRIVATE_REGISTRY_API_KEY']
return None
@_retryConnectionErrors
def _listVersions(namespace, name):
sources = _getSources()
registry_urls = [s['url'] for s in sources if 'type' in s and s['type'] == 'registry']
# look in the public registry last
registry_urls.append(Registry_Base_URL)
versions = []
for registry in registry_urls:
# list versions of the package:
url = '%s/%s/%s/versions' % (
registry,
namespace,
name
)
request_headers = _headersForRegistry(registry)
logger.debug("GET %s, %s", url, request_headers)
response = requests.get(url, headers=request_headers)
if response.status_code == 404:
continue
# raise any other HTTP errors
response.raise_for_status()
for x in ordered_json.loads(response.text):
rtv = RegistryThingVersion(x, namespace, name, registry=registry)
if not rtv in versions:
versions.append(rtv)
if not len(versions):
raise access_common.Unavailable(
('%s does not exist in the %s registry. '+
'Check that the name is correct, and that it has been published.') % (name, namespace)
)
return versions
def _tarballURL(namespace, name, version, registry=None):
registry = registry or Registry_Base_URL
return '%s/%s/%s/versions/%s/tarball' % (
registry, namespace, name, version
)
@_retryConnectionErrors
@_raiseUnavailableFor401("dependency is not available without logging in")
@_friendlyAuthError
@_handleAuth
def _getTarball(url, directory, sha256):
logger.debug('registry: get: %s' % url)
if not sha256:
logger.warn('tarball %s has no hash to check' % url)
try:
access_common.unpackFromCache(sha256, directory)
except KeyError as e:
# figure out which registry we're fetching this tarball from (if any)
# and add appropriate headers
registry = Registry_Base_URL
for source in _getSources():
if ('type' in source and source['type'] == 'registry' and
'url' in source and url.startswith(source['url'])):
registry = source['url']
break
request_headers = _headersForRegistry(registry)
logger.debug('GET %s, %s', url, request_headers)
response = requests.get(url, headers=request_headers, allow_redirects=True, stream=True)
response.raise_for_status()
access_common.unpackTarballStream(
stream = response,
into_directory = directory,
hash = {'sha256':sha256},
cache_key = sha256,
origin_info = {'url':url}
)
def _getSources():
sources = settings.get('sources')
if sources is None:
sources = []
return sources
def _isPublicRegistry(registry):
return (registry is None) or (registry == Registry_Base_URL)
def friendlyRegistryName(registry, short=False):
if registry.startswith(Registry_Base_URL):
if short:
return 'public registry'
else:
return 'the public module registry'
else:
return registry
def _getPrivateKey(registry):
if _isPublicRegistry(registry):
return settings.getProperty('keys', 'private')
else:
for s in _getSources():
if _sourceMatches(s, registry):
if 'keys' in s and s['keys'] and 'private' in s['keys']:
return s['keys']['private']
return None
def _sourceMatches(source, registry):
return ('type' in source and source['type'] == 'registry' and
'url' in source and source['url'] == registry)
def _generateAndSaveKeys(registry=None):
registry = registry or Registry_Base_URL
k = rsa.generate_private_key(
public_exponent=65537, key_size=2048, backend=default_backend()
)
privatekey_pem = k.private_bytes(
serialization.Encoding.PEM,
serialization.PrivateFormat.PKCS8,
serialization.NoEncryption()
)
pubkey_pem = k.public_key().public_bytes(
serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
if _isPublicRegistry(registry):
settings.setProperty('keys', 'private', privatekey_pem.decode('ascii'))
settings.setProperty('keys', 'public', pubkey_pem.decode('ascii'))
else:
sources = _getSources()
keys = None
for s in sources:
if _sourceMatches(s, registry):
if not 'keys' in s:
s['keys'] = dict()
keys = s['keys']
break
if keys is None:
keys = dict()
sources.append({
'type':'registry',
'url':registry,
'keys':keys
})
keys['private'] = privatekey_pem.decode('ascii')
keys['public'] = pubkey_pem.decode('ascii')
settings.set('sources', sources)
return pubkey_pem, privatekey_pem
def _getPrivateKeyObject(registry=None):
registry = registry or Registry_Base_URL
privatekey_pem = _getPrivateKey(registry)
if not privatekey_pem:
pubkey_pem, privatekey_pem = _generateAndSaveKeys(registry)
else:
# settings are unicode, we should be able to safely decode to ascii for
# the key though, as it will either be hex or PEM encoded:
privatekey_pem = privatekey_pem.encode('ascii')
# if the key doesn't look like PEM, it might be hex-encided-DER (which we
# used historically), so try loading that:
if b'-----BEGIN PRIVATE KEY-----' in privatekey_pem:
return serialization.load_pem_private_key(
privatekey_pem, None, default_backend()
)
else:
privatekey_der = binascii.unhexlify(privatekey_pem)
return serialization.load_der_private_key(
privatekey_der, None, default_backend()
)
def _getYottaVersion():
import yotta
return yotta.__version__
def _getYottaClientUUID():
import uuid
current_uuid = settings.get('uuid')
if current_uuid is None:
current_uuid = u'%s' % uuid.uuid4()
settings.set('uuid', current_uuid)
return current_uuid
def _headersForRegistry(registry):
registry = registry or Registry_Base_URL
auth_token = generate_jwt_token(_getPrivateKeyObject(registry), registry)
mbed_user_id = os.environ.get('MBED_USER_ID', None)
r = {
'Authorization': 'Bearer %s' % auth_token,
'X-Yotta-Client-Version': _getYottaVersion(),
'X-Yotta-Client-ID': _getYottaClientUUID()
}
if mbed_user_id is not None:
r['X-Yotta-MBED-User-ID'] = mbed_user_id
if registry == Registry_Base_URL:
return r
for s in _getSources():
if _sourceMatches(s, registry):
if 'apikey' in s:
r['X-Api-Key'] = s['apikey']
break
return r
# API
class RegistryThingVersion(access_common.RemoteVersion):
def __init__(self, data, namespace, name, registry=None):
logger.debug('RegistryThingVersion %s/%s data: %s' % (namespace, name, data))
version = data['version']
self.namespace = namespace
self.name = name
self.version = version
if 'hash' in data and 'sha256' in data['hash']:
self.sha256 = data['hash']['sha256']
else:
self.sha256 = None
url = _tarballURL(self.namespace, self.name, version, registry)
super(RegistryThingVersion, self).__init__(
version, url, name=name, friendly_source=friendlyRegistryName(registry)
)
def unpackInto(self, directory):
assert(self.url)
_getTarball(self.url, directory, self.sha256)
class RegistryThing(access_common.RemoteComponent):
def __init__(self, name, version_spec, namespace):
self.name = name
self.spec = version_spec
self.namespace = namespace
@classmethod
def createFromSource(cls, vs, name, registry):
''' returns a registry component for anything that's a valid package
name (this does not guarantee that the component actually exists in
the registry: use availableVersions() for that).
'''
# we deliberately allow only lowercase, hyphen, and (unfortunately)
# numbers in package names, to reduce the possibility of confusingly
# similar names: if the name doesn't match this then escalate to make
# the user fix it. Targets also allow +
if registry == 'targets':
name_match = re.match('^[a-z]+[a-z0-9+-]*$', name)
if not name_match:
raise access_common.AccessException(
'Target name "%s" is not valid (must contain only lowercase letters, hyphen, plus, and numbers)' % name
)
else:
name_match = re.match('^[a-z]+[a-z0-9-]*$', name)
if not name_match:
raise access_common.AccessException(
'Module name "%s" is not valid (must contain only lowercase letters, hyphen, and numbers)' % name
)
assert(vs.semantic_spec)
return RegistryThing(name, vs.semantic_spec, registry)
def versionSpec(self):
return self.spec
def availableVersions(self):
''' return a list of Version objects, each able to retrieve a tarball '''
return _listVersions(self.namespace, self.name)
def tipVersion(self):
raise NotImplementedError()
@classmethod
def remoteType(cls):
return 'registry'
@_swallowRequestExceptions(fail_return="request exception occurred")
@_retryConnectionErrors
@_friendlyAuthError
@_handleAuth
def publish(namespace, name, version, description_file, tar_file, readme_file,
readme_file_ext, registry=None):
''' Publish a tarblob to the registry, if the request fails, an exception
is raised, which either triggers re-authentication, or is turned into a
return value by the decorators. (If successful, the decorated function
returns None)
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
name,
version
)
if readme_file_ext == '.md':
readme_section_name = 'readme.md'
elif readme_file_ext == '':
readme_section_name = 'readme'
else:
raise ValueError('unsupported readme type: "%s"' % readme_file_ext)
# description file is in place as text (so read it), tar file is a file
body = OrderedDict([('metadata', (None, description_file.read(),'application/json')),
('tarball',('tarball', tar_file)),
(readme_section_name, (readme_section_name, readme_file))])
headers = _headersForRegistry(registry)
response = requests.put(url, headers=headers, files=body)
response.raise_for_status()
return None
@_swallowRequestExceptions(fail_return="request exception occurred")
@_retryConnectionErrors
@_friendlyAuthError
@_handleAuth
def unpublish(namespace, name, version, registry=None):
''' Try to unpublish a recently published version. Return any errors that
occur.
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
name,
version
)
headers = _headersForRegistry(registry)
response = requests.delete(url, headers=headers)
response.raise_for_status()
return None
@_swallowRequestExceptions(fail_return=None)
@_retryConnectionErrors
@_friendlyAuthError
@_handleAuth
def listOwners(namespace, name, registry=None):
''' List the owners of a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/owners' % (
registry,
namespace,
name
)
request_headers = _headersForRegistry(registry)
response = requests.get(url, headers=request_headers)
if response.status_code == 404:
logger.error('no such %s, "%s"' % (namespace[:-1], name))
return None
# raise exceptions for other errors - the auth decorators handle these and
# re-try if appropriate
response.raise_for_status()
return ordered_json.loads(response.text)
@_swallowRequestExceptions(fail_return=None)
@_retryConnectionErrors
@_friendlyAuthError
@_handleAuth
def addOwner(namespace, name, owner, registry=None):
''' Add an owner for a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/owners/%s' % (
registry,
namespace,
name,
owner
)
request_headers = _headersForRegistry(registry)
response = requests.put(url, headers=request_headers)
if response.status_code == 404:
logger.error('no such %s, "%s"' % (namespace[:-1], name))
return
# raise exceptions for other errors - the auth decorators handle these and
# re-try if appropriate
response.raise_for_status()
return True
@_swallowRequestExceptions(fail_return=None)
@_retryConnectionErrors
@_friendlyAuthError
@_handleAuth
def removeOwner(namespace, name, owner, registry=None):
''' Remove an owner for a module or target (owners are the people with
permission to publish versions and add/remove the owners).
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/owners/%s' % (
registry,
namespace,
name,
owner
)
request_headers = _headersForRegistry(registry)
response = requests.delete(url, headers=request_headers)
if response.status_code == 404:
logger.error('no such %s, "%s"' % (namespace[:-1], name))
return
# raise exceptions for other errors - the auth decorators handle these and
# re-try if appropriate
response.raise_for_status()
return True
@_friendlyAuthError
@_retryConnectionErrors
def whoami(registry=None):
registry = registry or Registry_Base_URL
url = '%s/users/me' % (
registry
)
request_headers = _headersForRegistry(registry)
logger.debug('test login...')
response = requests.get(url, headers=request_headers)
if response.status_code == 401:
# not logged in
return None
else:
response.raise_for_status()
return ', '.join(ordered_json.loads(response.text).get('primary_emails', {}).values())
@_retryConnectionErrors
def search(query='', keywords=[], registry=None):
''' generator of objects returned by the search endpoint (both modules and
targets).
Query is a full-text search (description, name, keywords), keywords
search only the module/target description keywords lists.
If both parameters are specified the search is the intersection of the
two queries.
'''
registry = registry or Registry_Base_URL
url = '%s/search' % registry
headers = _headersForRegistry(registry)
params = {
'skip': 0,
'limit': 50
}
if len(query):
params['query'] = query
if len(keywords):
params['keywords[]'] = keywords
while True:
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
objects = ordered_json.loads(response.text)
if len(objects):
for o in objects:
yield o
params['skip'] += params['limit']
else:
break
def deauthorize(registry=None):
registry = registry or Registry_Base_URL
if _isPublicRegistry(registry):
if settings.get('keys'):
settings.set('keys', dict())
else:
sources = [s for s in _getSources() if not _sourceMatches(s, registry)]
settings.set('sources', sources)
def setAPIKey(registry, api_key):
''' Set the api key for accessing a registry. This is only necessary for
development/test registries.
'''
if (registry is None) or (registry == Registry_Base_URL):
return
sources = _getSources()
source = None
for s in sources:
if _sourceMatches(s, registry):
source = s
if source is None:
source = {
'type':'registry',
'url':registry,
}
sources.append(source)
source['apikey'] = api_key
settings.set('sources', sources)
def getPublicKey(registry=None):
''' Return the user's public key (generating and saving a new key pair if necessary) '''
registry = registry or Registry_Base_URL
pubkey_pem = None
if _isPublicRegistry(registry):
pubkey_pem = settings.getProperty('keys', 'public')
else:
for s in _getSources():
if _sourceMatches(s, registry):
if 'keys' in s and s['keys'] and 'public' in s['keys']:
pubkey_pem = s['keys']['public']
break
if not pubkey_pem:
pubkey_pem, privatekey_pem = _generateAndSaveKeys()
else:
# settings are unicode, we should be able to safely decode to ascii for
# the key though, as it will either be hex or PEM encoded:
pubkey_pem = pubkey_pem.encode('ascii')
# if the key doesn't look like PEM, it might be hex-encided-DER (which we
# used historically), so try loading that:
if b'-----BEGIN PUBLIC KEY-----' in pubkey_pem:
pubkey = serialization.load_pem_public_key(pubkey_pem, default_backend())
else:
pubkey_der = binascii.unhexlify(pubkey_pem)
pubkey = serialization.load_der_public_key(pubkey_der, default_backend())
return _pubkeyWireFormat(pubkey)
@_retryConnectionErrors
def getAuthData(registry=None):
''' Poll the registry to get the result of a completed authentication
(which, depending on the authentication the user chose or was directed
to, will include a github or other access token)
'''
registry = registry or Registry_Base_URL
url = '%s/tokens' % (
registry
)
request_headers = _headersForRegistry(registry)
logger.debug('poll for tokens... %s', request_headers)
try:
response = requests.get(url, headers=request_headers)
except requests.RequestException as e:
logger.debug(str(e))
return None
if response.status_code == requests.codes.unauthorized: #pylint: disable=no-member
logger.debug('Unauthorised')
return None
elif response.status_code == requests.codes.not_found: #pylint: disable=no-member
logger.debug('Not Found')
return None
body = response.text
logger.debug('auth data response: %s' % body);
r = {}
parsed_response = ordered_json.loads(body)
if 'error' in parsed_response:
raise AuthError(parsed_response['error'])
for token in parsed_response:
if 'provider' in token and token['provider'] and 'accessToken' in token:
r[token['provider']] = token['accessToken']
break
logger.debug('parsed auth tokens %s' % r);
return r
def getLoginURL(provider=None, registry=None):
registry = registry or Registry_Base_URL
if provider:
query = ('?provider=%s' % provider)
else:
query = ''
if not _isPublicRegistry(registry):
if not len(query):
query = '?'
query += '&private=1'
return Website_Base_URL + '/' + query + '#login/' + getPublicKey(registry)
|
autopulated/yotta
|
yotta/lib/registry_access.py
|
Python
|
apache-2.0
| 27,971
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from sahara import context
from sahara.utils import rpc as messaging
LOG = logging.getLogger(__name__)
SERVICE = 'sahara'
CLUSTER_EVENT_TEMPLATE = "sahara.cluster.%s"
HEALTH_EVENT_TYPE = CLUSTER_EVENT_TEMPLATE % "health"
notifier_opts = [
cfg.StrOpt('level',
default='INFO',
deprecated_name='notification_level',
deprecated_group='DEFAULT',
help='Notification level for outgoing notifications'),
cfg.StrOpt('publisher_id',
deprecated_name='notification_publisher_id',
deprecated_group='DEFAULT')
]
notifier_opts_group = 'oslo_messaging_notifications'
CONF = cfg.CONF
CONF.register_opts(notifier_opts, group=notifier_opts_group)
def _get_publisher():
publisher_id = CONF.oslo_messaging_notifications.publisher_id
if publisher_id is None:
publisher_id = SERVICE
return publisher_id
def _notify(event_type, body):
LOG.debug("Notification about cluster is going to be sent. Notification "
"type={type}".format(type=event_type))
ctx = context.ctx()
level = CONF.oslo_messaging_notifications.level
body.update({'project_id': ctx.tenant_id, 'user_id': ctx.user_id})
client = messaging.get_notifier(_get_publisher())
method = getattr(client, level.lower())
method(ctx, event_type, body)
def _health_notification_body(cluster, health_check):
verification = cluster.verification
return {
'cluster_id': cluster.id,
'cluster_name': cluster.name,
'verification_id': verification['id'],
'health_check_status': health_check['status'],
'health_check_name': health_check['name'],
'health_check_description': health_check['description'],
'created_at': health_check['created_at'],
'updated_at': health_check['updated_at']
}
def status_notify(cluster_id, cluster_name, cluster_status, ev_type):
"""Sends notification about creating/updating/deleting cluster."""
_notify(CLUSTER_EVENT_TEMPLATE % ev_type, {
'cluster_id': cluster_id, 'cluster_name': cluster_name,
'cluster_status': cluster_status})
def health_notify(cluster, health_check):
"""Sends notification about current cluster health."""
_notify(HEALTH_EVENT_TYPE,
_health_notification_body(cluster, health_check))
|
tellesnobrega/sahara
|
sahara/utils/notification/sender.py
|
Python
|
apache-2.0
| 2,996
|
"""
Module responsible for translating reference sequence data into GA4GH native
objects.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hashlib
import json
import os
import random
import pysam
import ga4gh.datamodel as datamodel
import ga4gh.protocol as protocol
import ga4gh.exceptions as exceptions
DEFAULT_REFERENCESET_NAME = "Default"
"""
This is the name used for any reference set referred to in a BAM
file that does not provide the 'AS' tag in the @SQ header.
"""
class AbstractReferenceSet(datamodel.DatamodelObject):
"""
Class representing ReferenceSets. A ReferenceSet is a set of
References which typically comprise a reference assembly, such as
GRCh38.
"""
compoundIdClass = datamodel.ReferenceSetCompoundId
def __init__(self, localId):
super(AbstractReferenceSet, self).__init__(None, localId)
self._referenceIdMap = {}
self._referenceNameMap = {}
self._referenceIds = []
self._assemblyId = None
self._description = None
self._isDerived = False
self._ncbiTaxonId = None
self._sourceAccessions = []
self._sourceUri = None
def addReference(self, reference):
"""
Adds the specified reference to this ReferenceSet.
"""
id_ = reference.getId()
self._referenceIdMap[id_] = reference
self._referenceNameMap[reference.getLocalId()] = reference
self._referenceIds.append(id_)
def getReferences(self):
"""
Returns the References in this ReferenceSet.
"""
return [self._referenceIdMap[id_] for id_ in self._referenceIds]
def getNumReferences(self):
"""
Returns the number of references in this ReferenceSet.
"""
return len(self._referenceIds)
def getReferenceByIndex(self, index):
"""
Returns the reference at the specified index in this ReferenceSet.
"""
return self._referenceIdMap[self._referenceIds[index]]
def getReferenceByName(self, name):
"""
Returns the reference with the specified name.
"""
if name not in self._referenceNameMap:
raise exceptions.ReferenceNameNotFoundException(name)
return self._referenceNameMap[name]
def getReference(self, id_):
"""
Returns the Reference with the specified ID or raises a
ReferenceNotFoundException if it does not exist.
"""
if id_ not in self._referenceIdMap:
raise exceptions.ReferenceNotFoundException(id_)
return self._referenceIdMap[id_]
def getMd5Checksum(self):
"""
Returns the MD5 checksum for this reference set. This checksum is
calculated by making a list of `Reference.md5checksum` for all
`Reference`s in this set. We then sort this list, and take the
MD5 hash of all the strings concatenated together.
"""
references = sorted(
self.getReferences(),
key=lambda ref: ref.getMd5Checksum())
checksums = ''.join([ref.getMd5Checksum() for ref in references])
md5checksum = hashlib.md5(checksums).hexdigest()
return md5checksum
def getAssemblyId(self):
"""
Returns the assembly ID for this reference set.
This is the public id of this reference set, such as `GRCh37`
"""
return self._assemblyId
def getDescription(self):
"""
Returns the free text description of this reference set.
"""
return self._description
def getIsDerived(self):
"""
Returns True if this ReferenceSet is derived. A ReferenceSet
may be derived from a source if it contains additional sequences,
or some of the sequences within it are derived.
"""
return self._isDerived
def getSourceAccessions(self):
"""
Returns the list of source accession strings. These are all known
corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
with a version number, e.g. `NC_000001.11`.
"""
return self._sourceAccessions
def getSourceUri(self):
"""
Returns the sourceURI for this ReferenceSet.
"""
return self._sourceUri
def getNcbiTaxonId(self):
"""
Returns the NCBI Taxon ID for this reference set. This is the
ID from http://www.ncbi.nlm.nih.gov/taxonomy (e.g. 9606->human)
indicating the species which this assembly is intended to model.
Note that contained `Reference`s may specify a different
`ncbiTaxonId`, as assemblies may contain reference sequences
which do not belong to the modeled species, e.g. EBV in a
human reference genome.
"""
return self._ncbiTaxonId
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReferenceSet.
"""
ret = protocol.ReferenceSet()
ret.assemblyId = self.getAssemblyId()
ret.description = self.getDescription()
ret.id = self.getId()
ret.isDerived = self.getIsDerived()
ret.md5checksum = self.getMd5Checksum()
ret.ncbiTaxonId = self.getNcbiTaxonId()
ret.referenceIds = self._referenceIds
ret.sourceAccessions = self.getSourceAccessions()
ret.sourceURI = self.getSourceUri()
ret.name = self.getLocalId()
return ret
class AbstractReference(datamodel.DatamodelObject):
"""
Class representing References. A Reference is a canonical
assembled contig, intended to act as a reference coordinate space
for other genomic annotations. A single Reference might represent
the human chromosome 1, for instance.
"""
compoundIdClass = datamodel.ReferenceCompoundId
def __init__(self, parentContainer, localId):
super(AbstractReference, self).__init__(parentContainer, localId)
self._length = -1
self._md5checksum = ""
self._sourceUri = None
self._sourceAccessions = []
self._isDerived = False
self._sourceDivergence = None
self._ncbiTaxonId = None
def getLength(self):
"""
Returns the length of this reference's sequence string.
"""
return self._length
def getName(self):
"""
Returns the name of this reference, e.g., '22'.
"""
return self.getLocalId()
def getIsDerived(self):
"""
Returns True if this Reference is derived. A sequence X is said to be
derived from source sequence Y, if X and Y are of the same length and
the per-base sequence divergence at A/C/G/T bases is sufficiently
small. Two sequences derived from the same official sequence share the
same coordinates and annotations, and can be replaced with the official
sequence for certain use cases.
"""
return self._isDerived
def getSourceDivergence(self):
"""
Returns the source divergence for this reference. The sourceDivergence
is the fraction of non-indel bases that do not match the
reference this record was derived from.
"""
return self._sourceDivergence
def getSourceAccessions(self):
"""
Returns the list of source accession strings. These are all known
corresponding accession IDs in INSDC (GenBank/ENA/DDBJ) ideally
with a version number, e.g. `NC_000001.11`.
"""
return self._sourceAccessions
def getSourceUri(self):
"""
The URI from which the sequence was obtained. Specifies a FASTA format
file/string with one name, sequence pair.
"""
return self._sourceUri
def getNcbiTaxonId(self):
"""
Returns the NCBI Taxon ID for this reference. This is the
ID from http://www.ncbi.nlm.nih.gov/taxonomy (e.g. 9606->human)
indicating the species which this assembly is intended to model.
Note that contained `Reference`s may specify a different
`ncbiTaxonId`, as assemblies may contain reference sequences
which do not belong to the modeled species, e.g. EBV in a
human reference genome.
"""
return self._ncbiTaxonId
def getMd5Checksum(self):
"""
Returns the MD5 checksum uniquely representing this `Reference` as a
lower-case hexadecimal string, calculated as the MD5 of the upper-case
sequence excluding all whitespace characters.
"""
return self._md5checksum
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this Reference.
"""
reference = protocol.Reference()
reference.id = self.getId()
reference.isDerived = self.getIsDerived()
reference.length = self.getLength()
reference.md5checksum = self.getMd5Checksum()
reference.name = self.getName()
reference.ncbiTaxonId = self.getNcbiTaxonId()
reference.sourceAccessions = self.getSourceAccessions()
reference.sourceDivergence = self.getSourceDivergence()
reference.sourceURI = self.getSourceUri()
return reference
def checkQueryRange(self, start, end):
"""
Checks to ensure that the query range is valid within this reference.
If not, raise ReferenceRangeErrorException.
"""
condition = (
(start < 0 or end > self.getLength()) or
start > end)
if condition:
raise exceptions.ReferenceRangeErrorException(
self.getId(), start, end)
def getBases(self, start, end):
"""
Returns the string representing the bases of this reference from
start (inclusive) to end (exclusive).
"""
raise NotImplemented()
##################################################################
#
# Simulated references
#
##################################################################
class SimulatedReferenceSet(AbstractReferenceSet):
"""
A simulated referenceSet
"""
def __init__(self, localId, randomSeed=0, numReferences=1):
super(SimulatedReferenceSet, self).__init__(localId)
self._randomSeed = randomSeed
self._randomGenerator = random.Random()
self._randomGenerator.seed(self._randomSeed)
self._description = "Simulated reference set"
self._assemblyId = str(random.randint(0, 2**32))
self._isDerived = bool(random.randint(0, 1))
self._ncbiTaxonId = random.randint(0, 2**16)
self._sourceAccessions = []
for i in range(random.randint(1, 3)):
self._sourceAccessions.append("sim_accession_{}".format(
random.randint(1, 2**32)))
self._sourceUri = "http://example.com/reference.fa"
for i in range(numReferences):
referenceSeed = self._randomGenerator.getrandbits(32)
referenceLocalId = "srs{}".format(i)
reference = SimulatedReference(
self, referenceLocalId, referenceSeed)
self.addReference(reference)
class SimulatedReference(AbstractReference):
"""
A simulated reference. Stores a random sequence of a given length, and
generates remaining attributes randomly.
"""
def __init__(self, parentContainer, localId, randomSeed=0, length=200):
super(SimulatedReference, self).__init__(parentContainer, localId)
rng = random.Random()
rng.seed(randomSeed)
self._length = length
bases = [rng.choice('ACGT') for _ in range(self._length)]
self._bases = ''.join(bases)
self._md5checksum = hashlib.md5(self._bases).hexdigest()
self._isDerived = bool(rng.randint(0, 1))
self._sourceDivergence = 0
if self._isDerived:
self._sourceDivergence = rng.uniform(0, 0.1)
self._ncbiTaxonId = random.randint(0, 2**16)
self._sourceAccessions = []
for i in range(random.randint(1, 3)):
self._sourceAccessions.append("sim_accession_{}".format(
random.randint(1, 2**32)))
self._sourceUri = "http://example.com/reference.fa"
def getBases(self, start, end):
self.checkQueryRange(start, end)
return self._bases[start:end]
##################################################################
#
# References based on htslib's FASTA file handling.
#
##################################################################
class HtslibReferenceSet(datamodel.PysamDatamodelMixin, AbstractReferenceSet):
"""
A referenceSet based on data on a file system
"""
def __init__(self, localId, dataDir, backend):
super(HtslibReferenceSet, self).__init__(localId)
self._dataDir = dataDir
self._setMetadata()
self._scanDataFiles(dataDir, ["*.fa.gz"])
def _setMetadata(self):
metadataFileName = '{}.json'.format(self._dataDir)
with open(metadataFileName) as metadataFile:
metadata = json.load(metadataFile)
try:
self._assemblyId = metadata['assemblyId']
self._description = metadata['description']
self._isDerived = metadata['isDerived']
self._ncbiTaxonId = metadata['ncbiTaxonId']
self._sourceAccessions = metadata['sourceAccessions']
self._sourceUri = metadata['sourceUri']
except KeyError as err:
raise exceptions.MissingReferenceSetMetadata(
metadataFileName, str(err))
def _addDataFile(self, path):
dirname, filename = os.path.split(path)
localId = filename.split(".")[0]
metadataFileName = os.path.join(dirname, "{}.json".format(localId))
with open(metadataFileName) as metadataFile:
metadata = json.load(metadataFile)
reference = HtslibReference(self, localId, path, metadata)
self.addReference(reference)
class HtslibReference(datamodel.PysamDatamodelMixin, AbstractReference):
"""
A reference based on data stored in a file on the file system
"""
def __init__(self, parentContainer, localId, dataFile, metadata):
super(HtslibReference, self).__init__(parentContainer, localId)
self._fastaFilePath = dataFile
fastaFile = self.getFileHandle(dataFile)
numReferences = len(fastaFile.references)
if numReferences != 1:
raise exceptions.NotExactlyOneReferenceException(
self._fastaFilePath, numReferences)
if fastaFile.references[0] != localId:
raise exceptions.InconsistentReferenceNameException(
self._fastaFilePath)
self._length = fastaFile.lengths[0]
try:
self._md5checksum = metadata["md5checksum"]
self._sourceUri = metadata["sourceUri"]
self._ncbiTaxonId = metadata["ncbiTaxonId"]
self._isDerived = metadata["isDerived"]
self._sourceDivergence = metadata["sourceDivergence"]
self._sourceAccessions = metadata["sourceAccessions"]
except KeyError as err:
raise exceptions.MissingReferenceMetadata(dataFile, str(err))
def getFastaFilePath(self):
"""
Returns the fasta file that this reference is derived from.
"""
return self._fastaFilePath
def openFile(self, dataFile):
return pysam.FastaFile(dataFile)
def getBases(self, start, end):
self.checkQueryRange(start, end)
fastaFile = self.getFileHandle(self._fastaFilePath)
# TODO we should have some error checking here...
bases = fastaFile.fetch(self.getLocalId(), start, end)
return bases
|
diekhans/ga4gh-server
|
ga4gh/datamodel/references.py
|
Python
|
apache-2.0
| 15,873
|
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BGP Attribute MP_UNREACH_NLRI
"""
import struct
from yabgp.message.attribute import Attribute
from yabgp.message.attribute import AttributeFlag
from yabgp.message.attribute import AttributeID
from yabgp.message.attribute.nlri.ipv4_mpls_vpn import IPv4MPLSVPN
from yabgp.message.attribute.nlri.ipv6_mpls_vpn import IPv6MPLSVPN
from yabgp.message.attribute.nlri.ipv4_flowspec import IPv4FlowSpec
from yabgp.message.attribute.nlri.ipv6_unicast import IPv6Unicast
from yabgp.message.attribute.nlri.labeled_unicast.ipv4 import IPv4LabeledUnicast
from yabgp.message.attribute.nlri.evpn import EVPN
from yabgp.message.attribute.nlri.linkstate import BGPLS
from yabgp.message.attribute.nlri.ipv4_srte import IPv4SRTE
from yabgp.common import afn
from yabgp.common import safn
from yabgp.common import exception as excep
from yabgp.common import constants as bgp_cons
class MpUnReachNLRI(Attribute):
"""
This is an optional non-transitive attribute that can be used for the
purpose of withdrawing multiple unfeasible routes from service.
An UPDATE message that contains the MP_UNREACH_NLRI is not required
to carry any other path attributes.
MP_UNREACH_NLRI coding information
+---------------------------------------------------------+
| Address Family Identifier (2 octets) |
+---------------------------------------------------------+
| Subsequent Address Family Identifier (1 octet) |
+---------------------------------------------------------+
| Withdrawn Routes (variable) |
+---------------------------------------------------------+
"""
ID = AttributeID.MP_UNREACH_NLRI
FLAG = AttributeFlag.OPTIONAL + AttributeFlag.EXTENDED_LENGTH
@classmethod
def parse(cls, value):
try:
afi, safi = struct.unpack('!HB', value[0:3])
except Exception:
raise excep.UpdateMessageError(sub_error=bgp_cons.ERR_MSG_UPDATE_ATTR_LEN,
data='')
nlri_bin = value[3:]
# for IPv4
if afi == afn.AFNUM_INET:
# VPNv4
if safi == safn.SAFNUM_LAB_VPNUNICAST:
nlri = IPv4MPLSVPN.parse(nlri_bin, iswithdraw=True)
return dict(afi_safi=(afi, safi), withdraw=nlri)
# BGP flow spec
elif safi == safn.SAFNUM_FSPEC_RULE:
# if nlri length is greater than 240 bytes, it is encoded over 2 bytes
withdraw_list = []
while nlri_bin:
length = ord(nlri_bin[0:1])
if length >> 4 == 0xf and len(nlri_bin) > 2:
length = struct.unpack('!H', nlri_bin[:2])[0]
nlri_tmp = nlri_bin[2: length + 2]
nlri_bin = nlri_bin[length + 2:]
else:
nlri_tmp = nlri_bin[1: length + 1]
nlri_bin = nlri_bin[length + 1:]
nlri = IPv4FlowSpec.parse(nlri_tmp)
if nlri:
withdraw_list.append(nlri)
return dict(afi_safi=(afi, safi), withdraw=withdraw_list)
else:
return dict(afi_safi=(afn.AFNUM_INET, safi), withdraw=repr(nlri_bin))
# for ipv6
elif afi == afn.AFNUM_INET6:
# for ipv6 unicast
if safi == safn.SAFNUM_UNICAST:
return dict(afi_safi=(afi, safi), withdraw=IPv6Unicast.parse(nlri_data=nlri_bin))
elif safi == safn.SAFNUM_LAB_VPNUNICAST:
return dict(afi_safi=(afi, safi), withdraw=IPv6MPLSVPN.parse(value=nlri_bin, iswithdraw=True))
else:
return dict(afi_safi=(afi, safi), withdraw=repr(nlri_bin))
# for l2vpn
elif afi == afn.AFNUM_L2VPN:
# for evpn
if safi == safn.SAFNUM_EVPN:
return dict(afi_safi=(afi, safi), withdraw=EVPN.parse(nlri_data=nlri_bin))
else:
return dict(afi_safi=(afi, safi), withdraw=repr(nlri_bin))
# BGP LS
elif afi == afn.AFNUM_BGPLS:
if safi == safn.SAFNUM_BGPLS:
withdraw = BGPLS.parse(nlri_bin)
return dict(afi_safi=(afi, safi), withdraw=withdraw)
else:
pass
else:
return dict(afi_safi=(afi, safi), withdraw=repr(nlri_bin))
@classmethod
def construct(cls, value):
"""Construct a attribute
:param value: python dictionary
{'afi_safi': (1,128),
'withdraw': []
"""
afi, safi = value['afi_safi']
if afi == afn.AFNUM_INET:
if safi == safn.SAFNUM_LAB_VPNUNICAST: # MPLS VPN
nlri = IPv4MPLSVPN.construct(value['withdraw'], iswithdraw=True)
if nlri:
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
else:
return None
elif safi == safn.SAFNUM_FSPEC_RULE:
try:
nlri_list = value.get('withdraw') or []
if not nlri_list:
return None
nlri_hex = b''
nlri_hex += IPv4FlowSpec.construct(value=nlri_list)
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri_hex
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
except Exception:
raise excep.ConstructAttributeFailed(
reason='failed to construct attributes',
data=value
)
elif safi == safn.SAFNUM_SRTE:
try:
nlri_list = value.get('withdraw') or {}
if not nlri_list:
return None
nlri_hex = b''
nlri_hex += IPv4SRTE.construct(data=value['withdraw'])
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri_hex
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
except Exception:
raise excep.ConstructAttributeFailed(
reason='failed to construct attributes',
data=value
)
elif safi == safn.SAFNUM_MPLS_LABEL:
try:
nlri_list = value.get('withdraw') or []
if not nlri_list:
return None
nlri_hex = b''
flag = 'withdraw'
nlri_hex += IPv4LabeledUnicast.construct(nlri_list, flag)
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri_hex
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
except Exception:
raise excep.ConstructAttributeFailed(
reason='failed to construct attributes',
data=value
)
else:
raise excep.ConstructAttributeFailed(
reason='unsupport this sub address family',
data=value)
elif afi == afn.AFNUM_INET6:
if safi == safn.SAFNUM_UNICAST:
nlri = IPv6Unicast.construct(nlri_list=value['withdraw'])
if nlri:
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
elif safi == safn.SAFNUM_LAB_VPNUNICAST:
nlri = IPv6MPLSVPN.construct(value=value['withdraw'], iswithdraw=True)
if nlri:
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
else:
return None
# for l2vpn
elif afi == afn.AFNUM_L2VPN:
# for evpn
if safi == safn.SAFNUM_EVPN:
nlri = EVPN.construct(nlri_list=value['withdraw'])
if nlri:
attr_value = struct.pack('!H', afi) + struct.pack('!B', safi) + nlri
return struct.pack('!B', cls.FLAG) + struct.pack('!B', cls.ID) \
+ struct.pack('!H', len(attr_value)) + attr_value
else:
return None
else:
raise excep.ConstructAttributeFailed(
reason='unsupport this sub address family',
data=value)
|
meidli/yabgp
|
yabgp/message/attribute/mpunreachnlri.py
|
Python
|
apache-2.0
| 9,951
|
import numpy as np
import pytest
import xarray as xr
from aospy import Region
from aospy.region import (
_get_land_mask,
BoundsRect,
)
from aospy.internal_names import (
LAT_STR,
LON_STR,
SFC_AREA_STR,
LAND_MASK_STR
)
from aospy.utils import Longitude
@pytest.fixture()
def values_for_reg_arr():
return np.array([[-2., 1.],
[np.nan, 5.],
[3., 3.],
[4., 4.2]])
@pytest.fixture()
def data_for_reg_calcs(values_for_reg_arr):
lat = [-10., 1., 10., 20.]
lon = [1., 10.]
sfc_area = [0.5, 1., 0.5, 0.25]
land_mask = [1., 1., 0., 1.]
lat = xr.DataArray(lat, dims=[LAT_STR], coords=[lat])
lon = xr.DataArray(lon, dims=[LON_STR], coords=[lon])
sfc_area = xr.DataArray(sfc_area, dims=[LAT_STR], coords=[lat])
land_mask = xr.DataArray(land_mask, dims=[LAT_STR], coords=[lat])
sfc_area, _ = xr.broadcast(sfc_area, lon)
land_mask, _ = xr.broadcast(land_mask, lon)
da = xr.DataArray(values_for_reg_arr, coords=[lat, lon])
da.coords[SFC_AREA_STR] = sfc_area
da.coords[LAND_MASK_STR] = land_mask
return da
_alt_names = {LON_STR: 'LONS', LAT_STR: 'LATS', LAND_MASK_STR: 'lm',
SFC_AREA_STR: 'AREA'}
@pytest.fixture()
def data_reg_alt_names(data_for_reg_calcs):
return data_for_reg_calcs.rename(_alt_names)
region_no_land_mask = Region(
name='test',
description='Test region with no land mask',
west_bound=0.,
east_bound=5,
south_bound=0,
north_bound=90.,
do_land_mask=False
)
region_land_mask = Region(
name='test',
description='Test region with land mask',
west_bound=0.,
east_bound=5,
south_bound=0,
north_bound=90.,
do_land_mask=True
)
_expected_mask = [[False, False],
[True, False],
[True, False],
[True, False]]
def test_get_land_mask_without_land_mask(data_for_reg_calcs):
result = _get_land_mask(data_for_reg_calcs,
region_no_land_mask.do_land_mask)
expected = 1
assert result == expected
def test_get_land_mask_with_land_mask(data_for_reg_calcs):
result = _get_land_mask(data_for_reg_calcs, region_land_mask.do_land_mask)
expected = data_for_reg_calcs[LAND_MASK_STR]
xr.testing.assert_identical(result, expected)
def test_get_land_mask_non_aospy_name(data_reg_alt_names):
result = _get_land_mask(data_reg_alt_names, region_land_mask.do_land_mask,
land_mask_str=_alt_names[LAND_MASK_STR])
expected = data_reg_alt_names[_alt_names[LAND_MASK_STR]]
xr.testing.assert_identical(result, expected)
def test_region_init():
region = Region(
name='test',
description='region description',
west_bound=0.,
east_bound=5,
south_bound=0,
north_bound=90.,
do_land_mask=True
)
assert region.name == 'test'
assert region.description == 'region description'
assert isinstance(region.mask_bounds, tuple)
assert len(region.mask_bounds) == 1
assert isinstance(region.mask_bounds[0], BoundsRect)
assert np.all(region.mask_bounds[0] ==
(Longitude(0.), Longitude(5), 0, 90.))
assert region.do_land_mask is True
def test_region_init_mult_rect():
bounds_in = [[1, 2, 3, 4], (-12, -30, 2.3, 9)]
region = Region(name='test', mask_bounds=bounds_in)
assert isinstance(region.mask_bounds, tuple)
assert len(region.mask_bounds) == 2
for (w, e, s, n), bounds in zip(bounds_in, region.mask_bounds):
assert isinstance(bounds, tuple)
assert np.all(bounds == (Longitude(w), Longitude(e), s, n))
def test_region_init_bad_bounds():
with pytest.raises(ValueError):
Region(mask_bounds=[(1, 2, 3)])
Region(mask_bounds=[(1, 2, 3, 4),
(1, 2, 3)])
def test_make_mask_single_rect(data_for_reg_calcs):
result = region_land_mask._make_mask(data_for_reg_calcs)
expected = xr.DataArray(_expected_mask, dims=[LAT_STR, LON_STR],
coords={LAT_STR: data_for_reg_calcs[LAT_STR],
LON_STR: data_for_reg_calcs[LON_STR]})
xr.testing.assert_equal(result.transpose(), expected)
def test_make_mask_mult_rect(data_for_reg_calcs):
mask_bounds = (region_land_mask.mask_bounds[0], [0, 360, -20, -5])
region = Region(name='mult_rect', mask_bounds=mask_bounds)
result = region._make_mask(data_for_reg_calcs)
expected_values = [[True, True],
[True, False],
[True, False],
[True, False]]
expected = xr.DataArray(expected_values, dims=[LAT_STR, LON_STR],
coords={LAT_STR: data_for_reg_calcs[LAT_STR],
LON_STR: data_for_reg_calcs[LON_STR]})
xr.testing.assert_equal(result.transpose(), expected)
@pytest.mark.parametrize(
'region',
[region_no_land_mask, region_land_mask],
ids=['no-land-mask', 'land-mask'])
def test_mask_var(data_for_reg_calcs, region):
# Test region masks first row and second column of test data. Note that
# first element of second row is np.nan in initial dataset.
expected_data = [[np.nan, np.nan],
[np.nan, np.nan],
[3., np.nan],
[4., np.nan]]
expected = data_for_reg_calcs.copy(deep=True)
expected.values = expected_data
result = region.mask_var(data_for_reg_calcs)
xr.testing.assert_identical(result, expected)
@pytest.mark.parametrize(
'region',
[region_no_land_mask, region_land_mask],
ids=['no-land-mask', 'land-mask'])
def test_mask_var_non_aospy_names(data_reg_alt_names, region):
# Test region masks first row and second column of test data. Note that
# first element of second row is np.nan in initial dataset.
expected_data = [[np.nan, np.nan],
[np.nan, np.nan],
[3., np.nan],
[4., np.nan]]
expected = data_reg_alt_names.copy(deep=True)
expected.values = expected_data
result = region.mask_var(data_reg_alt_names, lon_str=_alt_names[LON_STR],
lat_str=_alt_names[LAT_STR])
xr.testing.assert_identical(result, expected)
def test_ts_no_land_mask(data_for_reg_calcs):
result = region_no_land_mask.ts(data_for_reg_calcs)
data = data_for_reg_calcs.values
sfc_area = data_for_reg_calcs.sfc_area.values
exp_numerator = data[2, 0] * sfc_area[2, 0] + data[3, 0] * sfc_area[3, 0]
exp_denominator = sfc_area[2, 0] + sfc_area[3, 0]
expected = xr.DataArray(exp_numerator / exp_denominator)
xr.testing.assert_identical(result, expected)
def test_ts_land_mask(data_for_reg_calcs):
result = region_land_mask.ts(data_for_reg_calcs)
expected = xr.DataArray(data_for_reg_calcs.values[3, 0])
xr.testing.assert_identical(result, expected)
_map_to_alt_names = {'lon_str': _alt_names[LON_STR],
'lat_str': _alt_names[LAT_STR],
'land_mask_str': _alt_names[LAND_MASK_STR],
'sfc_area_str': _alt_names[SFC_AREA_STR]}
def test_ts_non_aospy_names(data_reg_alt_names):
result = region_land_mask.ts(data_reg_alt_names, **_map_to_alt_names)
expected = xr.DataArray(data_reg_alt_names.values[3, 0])
xr.testing.assert_identical(result, expected)
|
spencerkclark/aospy
|
aospy/test/test_region.py
|
Python
|
apache-2.0
| 7,452
|
# Copyright (c) 2011-2014 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
import email
import mock
import time
from swift.common import swob
from swift.common.middleware.s3api.s3api import filter_factory
from swift.common.middleware.s3api.etree import fromstring
from swift.common.middleware.s3api.utils import Config
from test.unit import debug_logger
from test.unit.common.middleware.s3api.helpers import FakeSwift
class FakeApp(object):
def __init__(self):
self.swift = FakeSwift()
def _update_s3_path_info(self, env):
"""
For S3 requests, Swift auth middleware replaces a user name in
env['PATH_INFO'] with a valid tenant id.
E.g. '/v1/test:tester/bucket/object' will become
'/v1/AUTH_test/bucket/object'. This method emulates the behavior.
"""
tenant_user = env['s3api.auth_details']['access_key']
tenant, user = tenant_user.rsplit(':', 1)
path = env['PATH_INFO']
env['PATH_INFO'] = path.replace(tenant_user, 'AUTH_' + tenant)
def __call__(self, env, start_response):
if 's3api.auth_details' in env:
self._update_s3_path_info(env)
if env['REQUEST_METHOD'] == 'TEST':
def authorize_cb(req):
# Assume swift owner, if not yet set
req.environ.setdefault('REMOTE_USER', 'authorized')
req.environ.setdefault('swift_owner', True)
# But then default to blocking authz, to ensure we've replaced
# the default auth system
return swob.HTTPForbidden(request=req)
env['swift.authorize'] = authorize_cb
return self.swift(env, start_response)
class S3ApiTestCase(unittest.TestCase):
def __init__(self, name):
unittest.TestCase.__init__(self, name)
def setUp(self):
# setup default config
self.conf = Config({
'allow_no_owner': False,
'location': 'us-east-1',
'dns_compliant_bucket_names': True,
'max_bucket_listing': 1000,
'max_parts_listing': 1000,
'max_multi_delete_objects': 1000,
's3_acl': False,
'storage_domain': 'localhost',
'auth_pipeline_check': True,
'max_upload_part_num': 1000,
'check_bucket_owner': False,
'force_swift_request_proxy_log': False,
'allow_multipart_uploads': True,
'min_segment_size': 5242880,
})
# those 2 settings has existed the original test setup
self.conf.log_level = 'debug'
self.app = FakeApp()
self.swift = self.app.swift
self.s3api = filter_factory({}, **self.conf)(self.app)
self.logger = self.s3api.logger = self.swift.logger = debug_logger()
self.swift.register('HEAD', '/v1/AUTH_test',
swob.HTTPOk, {}, None)
self.swift.register('HEAD', '/v1/AUTH_test/bucket',
swob.HTTPNoContent, {}, None)
self.swift.register('PUT', '/v1/AUTH_test/bucket',
swob.HTTPCreated, {}, None)
self.swift.register('POST', '/v1/AUTH_test/bucket',
swob.HTTPNoContent, {}, None)
self.swift.register('DELETE', '/v1/AUTH_test/bucket',
swob.HTTPNoContent, {}, None)
self.swift.register('GET', '/v1/AUTH_test/bucket/object',
swob.HTTPOk, {'etag': 'object etag'}, "")
self.swift.register('PUT', '/v1/AUTH_test/bucket/object',
swob.HTTPCreated, {'etag': 'object etag'}, None)
self.swift.register('DELETE', '/v1/AUTH_test/bucket/object',
swob.HTTPNoContent, {}, None)
self.mock_get_swift_info_result = {'object_versioning': {}}
for s3api_path in (
'controllers.obj',
'controllers.bucket',
'controllers.multi_delete',
'controllers.versioning',
):
patcher = mock.patch(
'swift.common.middleware.s3api.%s.get_swift_info' % s3api_path,
return_value=self.mock_get_swift_info_result)
patcher.start()
self.addCleanup(patcher.stop)
def _get_error_code(self, body):
elem = fromstring(body, 'Error')
return elem.find('./Code').text
def _get_error_message(self, body):
elem = fromstring(body, 'Error')
return elem.find('./Message').text
def _test_method_error(self, method, path, response_class, headers={},
env={}, expected_xml_tags=None):
if not path.startswith('/'):
path = '/' + path # add a missing slash before the path
uri = '/v1/AUTH_test'
if path != '/':
uri += path
self.swift.register(method, uri, response_class, headers, None)
headers.update({'Authorization': 'AWS test:tester:hmac',
'Date': self.get_date_header()})
env.update({'REQUEST_METHOD': method})
req = swob.Request.blank(path, environ=env, headers=headers)
status, headers, body = self.call_s3api(req)
if expected_xml_tags is not None:
elem = fromstring(body, 'Error')
self.assertEqual(set(expected_xml_tags),
{x.tag for x in elem})
return self._get_error_code(body)
def get_date_header(self):
# email.utils.formatdate returns utc timestamp in default
return email.utils.formatdate(time.time())
def get_v4_amz_date_header(self, when=None):
if when is None:
when = datetime.utcnow()
return when.strftime('%Y%m%dT%H%M%SZ')
def call_app(self, req, app=None, expect_exception=False):
if app is None:
app = self.app
req.headers.setdefault("User-Agent", "Mozzarella Foxfire")
status = [None]
headers = [None]
def start_response(s, h, ei=None):
status[0] = s
headers[0] = swob.HeaderKeyDict(h)
body_iter = app(req.environ, start_response)
body = b''
caught_exc = None
try:
for chunk in body_iter:
body += chunk
except Exception as exc:
if expect_exception:
caught_exc = exc
else:
raise
if expect_exception:
return status[0], headers[0], body, caught_exc
else:
return status[0], headers[0], body
def call_s3api(self, req, **kwargs):
return self.call_app(req, app=self.s3api, **kwargs)
|
swiftstack/swift
|
test/unit/common/middleware/s3api/__init__.py
|
Python
|
apache-2.0
| 7,238
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import six
from jinja2 import Template, Environment, StrictUndefined, meta, exceptions
from st2common import log as logging
from st2common.constants.action import ACTION_KV_PREFIX
from st2common.constants.system import SYSTEM_KV_PREFIX
from st2common.exceptions import actionrunner
from st2common.services.keyvalues import KeyValueLookup
from st2common.util.casts import get_cast
from st2common.util.compat import to_unicode
LOG = logging.getLogger(__name__)
__all__ = [
'get_resolved_params',
'get_rendered_params',
'get_finalized_params',
]
def _split_params(runner_parameters, action_parameters, mixed_params):
def pf(params, skips):
result = {k: v for k, v in six.iteritems(mixed_params)
if k in params and k not in skips}
return result
return (pf(runner_parameters, {}), pf(action_parameters, runner_parameters))
def _get_resolved_runner_params(runner_parameters, action_parameters,
actionexec_runner_parameters):
# Runner parameters should use the defaults from the RunnerType object.
# The runner parameter defaults may be overridden by values provided in
# the Action and liveaction.
# Create runner parameter by merging default values with dynamic values
resolved_params = {k: v['default'] if 'default' in v else None
for k, v in six.iteritems(runner_parameters)}
# pick overrides from action_parameters & actionexec_runner_parameters
for param_name, param_value in six.iteritems(runner_parameters):
# No override if param is immutable
if param_value.get('immutable', False):
continue
# Check if param exists in action_parameters and if it has a default value then
# pickup the override.
if param_name in action_parameters:
action_param = action_parameters[param_name]
if 'default' in action_param:
resolved_params[param_name] = action_param['default']
# No further override (from liveaction) if param is immutable
if action_param.get('immutable', False):
continue
# Finally pick up override from actionexec_runner_parameters
if param_name in actionexec_runner_parameters:
resolved_params[param_name] = actionexec_runner_parameters[param_name]
return resolved_params
def _get_resolved_action_params(runner_parameters, action_parameters,
actionexec_action_parameters):
# Create action parameters by merging default values with dynamic values
resolved_params = {k: v['default'] if 'default' in v else None
for k, v in six.iteritems(action_parameters)
if k not in runner_parameters}
# pick overrides from actionexec_action_parameters
for param_name, param_value in six.iteritems(action_parameters):
# No override if param is immutable
if param_value.get('immutable', False):
continue
if param_name in actionexec_action_parameters and param_name not in runner_parameters:
resolved_params[param_name] = actionexec_action_parameters[param_name]
return resolved_params
def get_resolved_params(runnertype_parameter_info, action_parameter_info, actionexec_parameters):
'''
Looks at the parameter values from runner, action and action execution to fully resolve the
values. Resolution is the process of determinig the value of a parameter by taking into
consideration default, immutable and user supplied values.
'''
# Runner parameters should use the defaults from the RunnerType object.
# The runner parameter defaults may be overridden by values provided in
# the Action and liveaction.
actionexec_runner_parameters, actionexec_action_parameters = _split_params(
runnertype_parameter_info, action_parameter_info, actionexec_parameters)
runner_params = _get_resolved_runner_params(runnertype_parameter_info,
action_parameter_info,
actionexec_runner_parameters)
action_params = _get_resolved_action_params(runnertype_parameter_info,
action_parameter_info,
actionexec_action_parameters)
return runner_params, action_params
def _is_template(template_str):
template_str = to_unicode(template_str)
template = Template(template_str)
try:
return template_str != template.render({})
except exceptions.UndefinedError:
return True
def _renderable_context_param_split(action_parameters, runner_parameters, base_context=None):
# To render the params it is necessary to combine the params together so that cross
# parameter category references are resolved.
renderable_params = {}
# shallow copy since this will be updated
context_params = copy.copy(base_context) if base_context else {}
def do_render_context_split(source_params):
'''
Will split the supplied source_params into renderable_params and context_params. As part of
the split also makes sure that the all params are essentially strings.
'''
for k, v in six.iteritems(source_params):
renderable_v = v
# dict and list to be converted to str
if isinstance(renderable_v, dict) or isinstance(renderable_v, list):
renderable_v = json.dumps(renderable_v)
# only str can contain templates
if (isinstance(renderable_v, str) or isinstance(renderable_v, unicode)) and \
_is_template(renderable_v):
renderable_params[k] = renderable_v
elif isinstance(v, dict) or isinstance(v, list):
# For context use the renderable value for dict and list params. The template
# rendering by jinja yields a non json.loads compatible value leading to issues
# while performing casts.
context_params[k] = renderable_v
else:
# For context use the original value.
context_params[k] = v
do_render_context_split(action_parameters)
do_render_context_split(runner_parameters)
return (renderable_params, context_params)
def _check_availability(param, param_dependencies, renderable_params, context):
for dependency in param_dependencies:
if dependency not in renderable_params and dependency not in context:
return False
return True
def _check_cyclic(dep_chain, dependencies):
last_idx = len(dep_chain) - 1
last_value = dep_chain[last_idx]
for dependency in dependencies.get(last_value, []):
if dependency in dep_chain:
dep_chain.append(dependency)
return False
dep_chain.append(dependency)
if not _check_cyclic(dep_chain, dependencies):
return False
dep_chain.pop()
return True
def _validate_dependencies(renderable_params, context):
'''
Validates dependencies between the parameters.
e.g.
{
'a': '{{b}}',
'b': '{{a}}'
}
In this example 'a' requires 'b' for template rendering and vice-versa. There is no way for
these templates to be rendered and will be flagged with an ActionRunnerException.
'''
env = Environment(undefined=StrictUndefined)
dependencies = {}
for k, v in six.iteritems(renderable_params):
template_ast = env.parse(v)
dependencies[k] = meta.find_undeclared_variables(template_ast)
for k, v in six.iteritems(dependencies):
if not _check_availability(k, v, renderable_params, context):
msg = 'Dependecy unsatisfied - %s: %s.' % (k, v)
raise actionrunner.ActionRunnerException(msg)
dep_chain = []
dep_chain.append(k)
if not _check_cyclic(dep_chain, dependencies):
msg = 'Cyclic dependecy found - %s.' % dep_chain
raise actionrunner.ActionRunnerException(msg)
def _do_render_params(renderable_params, context):
'''
Will render the params per the context and will return best attempt to render. Render attempts
with missing params will leave blanks.
'''
if not renderable_params:
return renderable_params
_validate_dependencies(renderable_params, context)
env = Environment(undefined=StrictUndefined)
rendered_params = {}
rendered_params.update(context)
# Maps parameter key to render exception
# We save the exception so we can throw a more meaningful exception at the end if rendering of
# some parameter fails
parameter_render_exceptions = {}
num_parameters = len(renderable_params) + len(context)
# After how many attempts at failing to render parameter we should bail out
max_rendered_parameters_unchanged_count = num_parameters
rendered_params_unchanged_count = 0
while len(renderable_params) != 0:
renderable_params_pre_loop = renderable_params.copy()
for k, v in six.iteritems(renderable_params):
template = env.from_string(v)
try:
rendered = template.render(rendered_params)
rendered_params[k] = rendered
if k in parameter_render_exceptions:
del parameter_render_exceptions[k]
except Exception as e:
# Note: This sucks, but because we support multi level and out of order
# rendering, we can't throw an exception here yet since the parameter could get
# rendered in future iteration
LOG.debug('Failed to render %s: %s', k, v, exc_info=True)
parameter_render_exceptions[k] = e
for k in rendered_params:
if k in renderable_params:
del renderable_params[k]
if renderable_params_pre_loop == renderable_params:
rendered_params_unchanged_count += 1
# Make sure we terminate and don't end up in an infinite loop if we
# tried to render all the parameters but rendering of some parameters
# still fails
if rendered_params_unchanged_count >= max_rendered_parameters_unchanged_count:
k = parameter_render_exceptions.keys()[0]
e = parameter_render_exceptions[k]
msg = 'Failed to render parameter "%s": %s' % (k, str(e))
raise actionrunner.ActionRunnerException(msg)
return rendered_params
def _cast_params(rendered, parameter_schemas):
casted_params = {}
for k, v in six.iteritems(rendered):
# Add uncasted first and then override with casted param. Not all params will end up
# being cast.
casted_params[k] = v
# No casting if the value is None. It leads to weird cases like str(None) = 'None'
# leading to downstream failures as well as int(None) leading to TypeError.
if v is None:
continue
parameter_schema = parameter_schemas.get(k, None)
if not parameter_schema:
continue
parameter_type = parameter_schema.get('type', None)
if not parameter_type:
continue
cast = get_cast(cast_type=parameter_type)
if not cast:
continue
casted_params[k] = cast(v)
return casted_params
def get_rendered_params(runner_parameters, action_parameters, action_context,
runnertype_parameter_info, action_parameter_info):
'''
Renders the templates in runner_parameters and action_parameters. Using the type information
from *_parameter_info will appropriately cast the parameters.
'''
# To render the params it is necessary to combine the params together so that cross
# parameter category references are also rendered correctly. Particularly in the cases where
# a runner parameter is overridden in an action it is likely that a runner parameter could
# depend on an action parameter.
render_context = {SYSTEM_KV_PREFIX: KeyValueLookup()}
render_context[ACTION_KV_PREFIX] = action_context
renderable_params, context = _renderable_context_param_split(action_parameters,
runner_parameters,
render_context)
rendered_params = _do_render_params(renderable_params, context)
template_free_params = {}
template_free_params.update(rendered_params)
template_free_params.update(context)
r_runner_parameters, r_action_parameters = _split_params(runnertype_parameter_info,
action_parameter_info,
template_free_params)
return (_cast_params(r_runner_parameters, runnertype_parameter_info),
_cast_params(r_action_parameters, action_parameter_info))
def get_finalized_params(runnertype_parameter_info, action_parameter_info, liveaction_parameters,
action_context):
'''
Finalize the parameters for an action to execute by doing the following -
1. Split the parameters into those consumed by runner and action into separate dicts.
2. Render any templates in the parameters.
'''
runner_params, action_params = get_resolved_params(runnertype_parameter_info,
action_parameter_info,
liveaction_parameters)
runner_params, action_params = get_rendered_params(runner_params, action_params,
action_context,
runnertype_parameter_info,
action_parameter_info)
return (runner_params, action_params)
|
alfasin/st2
|
st2actions/st2actions/utils/param_utils.py
|
Python
|
apache-2.0
| 14,817
|
# -*- coding: utf-8 -*-
import sys
import pytest
py3 = sys.version_info[0] >= 3
class DummyCollector(pytest.collect.File):
def collect(self):
return []
def pytest_pycollect_makemodule(path, parent):
bn = path.basename
if "py3" in bn and not py3 or ("py2" in bn and py3):
return DummyCollector(path, parent=parent)
|
cloudera/hue
|
desktop/core/ext-py/pytest-4.6.11/doc/en/example/py2py3/conftest.py
|
Python
|
apache-2.0
| 348
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all teams that the current user belongs to.
The statement retrieves up to the maximum page size limit of 500. To create
teams, run create_user_team_associations.py.
Tags: UserTeamAssociationService.getUserTeamAssociationsByStatement
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
# Initialize appropriate service.
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201311')
user_service = client.GetService('UserService', version='v201311')
# Get the current user ID.
current_user_id = user_service.GetCurrentUser()[0]['id']
# Create filter text to select user team associations by the user ID.
values = [{
'key': 'userId',
'value': {
'xsi_type': 'NumberValue',
'value': current_user_id
}
}]
filter_statement = {'query': 'WHERE userId = :userId LIMIT 500',
'values': values}
# Get user team associations by statement.
response = user_team_association_service.GetUserTeamAssociationsByStatement(
filter_statement)[0]
user_team_associations = []
if 'results' in response:
user_team_associations = response['results']
# Display results.
for user_team_association in user_team_associations:
print ('User team association between user with ID \'%s\' and team with ID '
'\'%s\' was found.' % (user_team_association['userId'],
user_team_association['teamId']))
print
print 'Number of results found: %s' % len(user_team_associations)
|
caioserra/apiAdwords
|
examples/adspygoogle/dfp/v201311/user_team_association_service/get_user_team_associations_by_statement.py
|
Python
|
apache-2.0
| 2,518
|
import axi2s_c
import sys
uut = axi2s_c.axi2s_c()
uut.read(sys.argv[1])
|
ruishihan/R7-with-notes
|
src/python/ioread.py
|
Python
|
apache-2.0
| 82
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2012-2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for database migrations.
There are "opportunistic" tests which allows testing against all 3 databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up db's named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that db and u/p combo to run the tests.
For postgres on Ubuntu this can be done with the following commands::
| sudo -u postgres psql
| postgres=# create user openstack_citest with createdb login password
| 'openstack_citest';
| postgres=# create database openstack_citest with owner openstack_citest;
"""
import glob
import logging
import os
from migrate.versioning import repository
import mock
from oslo.db.sqlalchemy import test_base
from oslo.db.sqlalchemy import test_migrations
from oslo.db.sqlalchemy import utils as oslodbutils
import sqlalchemy
from sqlalchemy.engine import reflection
import sqlalchemy.exc
from sqlalchemy.sql import null
from nova.db import migration
from nova.db.sqlalchemy import migrate_repo
from nova.db.sqlalchemy import migration as sa_migration
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova.i18n import _
from nova import test
LOG = logging.getLogger(__name__)
class NovaMigrationsCheckers(test_migrations.WalkVersionsMixin):
"""Test sqlalchemy-migrate migrations."""
TIMEOUT_SCALING_FACTOR = 2
snake_walk = True
downgrade = True
@property
def INIT_VERSION(self):
return migration.db_initial_version()
@property
def REPOSITORY(self):
return repository.Repository(
os.path.abspath(os.path.dirname(migrate_repo.__file__)))
@property
def migration_api(self):
return sa_migration.versioning_api
@property
def migrate_engine(self):
return self.engine
def setUp(self):
super(NovaMigrationsCheckers, self).setUp()
# NOTE(viktors): We should reduce log output because it causes issues,
# when we run tests with testr
migrate_log = logging.getLogger('migrate')
old_level = migrate_log.level
migrate_log.setLevel(logging.WARN)
self.addCleanup(migrate_log.setLevel, old_level)
def assertColumnExists(self, engine, table_name, column):
self.assertTrue(oslodbutils.column_exists(engine, table_name, column))
def assertColumnNotExists(self, engine, table_name, column):
self.assertFalse(oslodbutils.column_exists(engine, table_name, column))
def assertTableNotExists(self, engine, table):
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, table)
def assertIndexExists(self, engine, table_name, index):
self.assertTrue(oslodbutils.index_exists(engine, table_name, index))
def assertIndexMembers(self, engine, table, index, members):
# NOTE(johannes): Order of columns can matter. Most SQL databases
# can use the leading columns for optimizing queries that don't
# include all of the covered columns.
self.assertIndexExists(engine, table, index)
t = oslodbutils.get_table(engine, table)
index_columns = None
for idx in t.indexes:
if idx.name == index:
index_columns = [c.name for c in idx.columns]
break
self.assertEqual(members, index_columns)
def _skippable_migrations(self):
special = [
216, # Havana
]
havana_placeholders = range(217, 227)
icehouse_placeholders = range(235, 244)
juno_placeholders = range(255, 265)
return (special +
havana_placeholders +
icehouse_placeholders +
juno_placeholders)
def migrate_up(self, version, with_data=False):
if with_data:
check = getattr(self, "_check_%03d" % version, None)
if version not in self._skippable_migrations():
self.assertIsNotNone(check,
('DB Migration %i does not have a '
'test. Please add one!') % version)
super(NovaMigrationsCheckers, self).migrate_up(version, with_data)
def test_walk_versions(self):
self.walk_versions(self.snake_walk, self.downgrade)
def _check_227(self, engine, data):
table = oslodbutils.get_table(engine, 'project_user_quotas')
# Insert fake_quotas with the longest resource name.
fake_quotas = {'id': 5,
'project_id': 'fake_project',
'user_id': 'fake_user',
'resource': 'injected_file_content_bytes',
'hard_limit': 10}
table.insert().execute(fake_quotas)
# Check we can get the longest resource name.
quota = table.select(table.c.id == 5).execute().first()
self.assertEqual(quota['resource'], 'injected_file_content_bytes')
def _check_228(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'metrics')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.metrics.type,
sqlalchemy.types.Text)
def _post_downgrade_228(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'metrics')
def _check_229(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'extra_resources')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.extra_resources.type,
sqlalchemy.types.Text)
def _post_downgrade_229(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'extra_resources')
def _check_230(self, engine, data):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnExists(engine, table_name, 'host')
self.assertColumnExists(engine, table_name, 'details')
action_events = oslodbutils.get_table(engine,
'instance_actions_events')
self.assertIsInstance(action_events.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(action_events.c.details.type,
sqlalchemy.types.Text)
def _post_downgrade_230(self, engine):
for table_name in ['instance_actions_events',
'shadow_instance_actions_events']:
self.assertColumnNotExists(engine, table_name, 'host')
self.assertColumnNotExists(engine, table_name, 'details')
def _check_231(self, engine, data):
self.assertColumnExists(engine, 'instances', 'ephemeral_key_uuid')
instances = oslodbutils.get_table(engine, 'instances')
self.assertIsInstance(instances.c.ephemeral_key_uuid.type,
sqlalchemy.types.String)
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _post_downgrade_231(self, engine):
self.assertColumnNotExists(engine, 'instances', 'ephemeral_key_uuid')
self.assertTrue(db_utils.check_shadow_table(engine, 'instances'))
def _check_232(self, engine, data):
table_names = ['compute_node_stats', 'compute_nodes',
'instance_actions', 'instance_actions_events',
'instance_faults', 'migrations']
for table_name in table_names:
self.assertTableNotExists(engine, 'dump_' + table_name)
def _check_233(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'stats')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
self.assertIsInstance(compute_nodes.c.stats.type,
sqlalchemy.types.Text)
self.assertRaises(sqlalchemy.exc.NoSuchTableError,
oslodbutils.get_table, engine, 'compute_node_stats')
def _post_downgrade_233(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'stats')
# confirm compute_node_stats exists
oslodbutils.get_table(engine, 'compute_node_stats')
def _check_234(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _check_244(self, engine, data):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(64, volume_usage_cache.c.user_id.type.length)
def _post_downgrade_244(self, engine):
volume_usage_cache = oslodbutils.get_table(
engine, 'volume_usage_cache')
self.assertEqual(36, volume_usage_cache.c.user_id.type.length)
def _pre_upgrade_245(self, engine):
# create a fake network
networks = oslodbutils.get_table(engine, 'networks')
fake_network = {'id': 1}
networks.insert().execute(fake_network)
def _check_245(self, engine, data):
networks = oslodbutils.get_table(engine, 'networks')
network = networks.select(networks.c.id == 1).execute().first()
# mtu should default to None
self.assertIsNone(network.mtu)
# dhcp_server should default to None
self.assertIsNone(network.dhcp_server)
# enable dhcp should default to true
self.assertTrue(network.enable_dhcp)
# share address should default to false
self.assertFalse(network.share_address)
def _post_downgrade_245(self, engine):
self.assertColumnNotExists(engine, 'networks', 'mtu')
self.assertColumnNotExists(engine, 'networks', 'dhcp_server')
self.assertColumnNotExists(engine, 'networks', 'enable_dhcp')
self.assertColumnNotExists(engine, 'networks', 'share_address')
def _check_246(self, engine, data):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(1, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _post_downgrade_246(self, engine):
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertEqual(0, len([fk for fk in pci_devices.foreign_keys
if fk.parent.name == 'compute_node_id']))
def _check_247(self, engine, data):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertFalse(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertTrue(pci_devices.c.deleted.nullable)
self.assertFalse(pci_devices.c.product_id.nullable)
self.assertFalse(pci_devices.c.vendor_id.nullable)
self.assertFalse(pci_devices.c.dev_type.nullable)
def _post_downgrade_247(self, engine):
quota_usages = oslodbutils.get_table(engine, 'quota_usages')
self.assertTrue(quota_usages.c.resource.nullable)
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
self.assertFalse(pci_devices.c.deleted.nullable)
self.assertTrue(pci_devices.c.product_id.nullable)
self.assertTrue(pci_devices.c.vendor_id.nullable)
self.assertTrue(pci_devices.c.dev_type.nullable)
def _check_248(self, engine, data):
self.assertIndexMembers(engine, 'reservations',
'reservations_deleted_expire_idx',
['deleted', 'expire'])
def _post_downgrade_248(self, engine):
reservations = oslodbutils.get_table(engine, 'reservations')
index_names = [idx.name for idx in reservations.indexes]
self.assertNotIn('reservations_deleted_expire_idx', index_names)
def _check_249(self, engine, data):
# Assert that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _post_downgrade_249(self, engine):
# The duplicate index is not created on downgrade, so this
# asserts that only one index exists that covers columns
# instance_uuid and device_name
bdm = oslodbutils.get_table(engine, 'block_device_mapping')
self.assertEqual(1, len([i for i in bdm.indexes
if [c.name for c in i.columns] ==
['instance_uuid', 'device_name']]))
def _check_250(self, engine, data):
self.assertTableNotExists(engine, 'instance_group_metadata')
self.assertTableNotExists(engine, 'shadow_instance_group_metadata')
def _post_downgrade_250(self, engine):
oslodbutils.get_table(engine, 'instance_group_metadata')
oslodbutils.get_table(engine, 'shadow_instance_group_metadata')
def _check_251(self, engine, data):
self.assertColumnExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnExists(engine, 'shadow_compute_nodes',
'numa_topology')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(engine,
'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_compute_nodes.c.numa_topology.type,
sqlalchemy.types.Text)
def _post_downgrade_251(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'numa_topology')
self.assertColumnNotExists(engine, 'shadow_compute_nodes',
'numa_topology')
def _check_252(self, engine, data):
oslodbutils.get_table(engine, 'instance_extra')
oslodbutils.get_table(engine, 'shadow_instance_extra')
self.assertIndexMembers(engine, 'instance_extra',
'instance_extra_idx',
['instance_uuid'])
def _post_downgrade_252(self, engine):
self.assertTableNotExists(engine, 'instance_extra')
self.assertTableNotExists(engine, 'shadow_instance_extra')
def _check_253(self, engine, data):
self.assertColumnExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnExists(
engine, 'shadow_instance_extra', 'pci_requests')
instance_extra = oslodbutils.get_table(engine, 'instance_extra')
shadow_instance_extra = oslodbutils.get_table(engine,
'shadow_instance_extra')
self.assertIsInstance(instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
self.assertIsInstance(shadow_instance_extra.c.pci_requests.type,
sqlalchemy.types.Text)
def _post_downgrade_253(self, engine):
self.assertColumnNotExists(engine, 'instance_extra', 'pci_requests')
self.assertColumnNotExists(engine, 'shadow_instance_extra',
'pci_requests')
def _check_254(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'request_id')
self.assertColumnExists(
engine, 'shadow_pci_devices', 'request_id')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.request_id.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_pci_devices.c.request_id.type,
sqlalchemy.types.String)
def _post_downgrade_254(self, engine):
self.assertColumnNotExists(engine, 'pci_devices', 'request_id')
self.assertColumnNotExists(
engine, 'shadow_pci_devices', 'request_id')
def _check_265(self, engine, data):
# Assert that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _post_downgrade_265(self, engine):
# The duplicated index is not created on downgrade, so this
# asserts that only one index exists that covers columns
# host and deleted
instances = oslodbutils.get_table(engine, 'instances')
self.assertEqual(1, len([i for i in instances.indexes
if [c.name for c in i.columns][:2] ==
['host', 'deleted']]))
# and only one index covers host column
iscsi_targets = oslodbutils.get_table(engine, 'iscsi_targets')
self.assertEqual(1, len([i for i in iscsi_targets.indexes
if [c.name for c in i.columns][:1] ==
['host']]))
def _check_266(self, engine, data):
self.assertColumnExists(engine, 'tags', 'resource_id')
self.assertColumnExists(engine, 'tags', 'tag')
table = oslodbutils.get_table(engine, 'tags')
self.assertIsInstance(table.c.resource_id.type,
sqlalchemy.types.String)
self.assertIsInstance(table.c.tag.type,
sqlalchemy.types.String)
def _post_downgrade_266(self, engine):
self.assertTableNotExists(engine, 'tags')
def _pre_upgrade_267(self, engine):
# Create a fixed_ips row with a null instance_uuid (if not already
# there) to make sure that's not deleted.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
fake_fixed_ip = {'id': 1}
fixed_ips.insert().execute(fake_fixed_ip)
# Create an instance record with a valid (non-null) UUID so we make
# sure we don't do something stupid and delete valid records.
instances = oslodbutils.get_table(engine, 'instances')
fake_instance = {'id': 1, 'uuid': 'fake-non-null-uuid'}
instances.insert().execute(fake_instance)
# Add a null instance_uuid entry for the volumes table
# since it doesn't have a foreign key back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
fake_volume = {'id': '9c3c317e-24db-4d57-9a6f-96e6d477c1da'}
volumes.insert().execute(fake_volume)
def _check_267(self, engine, data):
# Make sure the column is non-nullable and the UC exists.
fixed_ips = oslodbutils.get_table(engine, 'fixed_ips')
self.assertTrue(fixed_ips.c.instance_uuid.nullable)
fixed_ip = fixed_ips.select(fixed_ips.c.id == 1).execute().first()
self.assertIsNone(fixed_ip.instance_uuid)
instances = oslodbutils.get_table(engine, 'instances')
self.assertFalse(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertIn('uniq_instances0uuid', constraint_names)
# Make sure the instances record with the valid uuid is still there.
instance = instances.select(instances.c.id == 1).execute().first()
self.assertIsNotNone(instance)
# Check that the null entry in the volumes table is still there since
# we skipped tables that don't have FK's back to the instances table.
volumes = oslodbutils.get_table(engine, 'volumes')
self.assertTrue(volumes.c.instance_uuid.nullable)
volume = fixed_ips.select(
volumes.c.id == '9c3c317e-24db-4d57-9a6f-96e6d477c1da'
).execute().first()
self.assertIsNone(volume.instance_uuid)
def _post_downgrade_267(self, engine):
# Make sure the UC is gone and the column is nullable again.
instances = oslodbutils.get_table(engine, 'instances')
self.assertTrue(instances.c.uuid.nullable)
inspector = reflection.Inspector.from_engine(engine)
constraints = inspector.get_unique_constraints('instances')
constraint_names = [constraint['name'] for constraint in constraints]
self.assertNotIn('uniq_instances0uuid', constraint_names)
def test_migration_267(self):
# This is separate from test_walk_versions so we can test the case
# where there are non-null instance_uuid entries in the database which
# cause the 267 migration to fail.
engine = self.migrate_engine
self.migration_api.version_control(
engine, self.REPOSITORY, self.INIT_VERSION)
self.migration_api.upgrade(engine, self.REPOSITORY, 266)
# Create a consoles record with a null instance_uuid so
# we can test that the upgrade fails if that entry is found.
# NOTE(mriedem): We use the consoles table since that's the only table
# created in the 216 migration with a ForeignKey created on the
# instance_uuid table for sqlite.
consoles = oslodbutils.get_table(engine, 'consoles')
fake_console = {'id': 1}
consoles.insert().execute(fake_console)
# NOTE(mriedem): We handle the 267 migration where we expect to
# hit a ValidationError on the consoles table to have
# a null instance_uuid entry
ex = self.assertRaises(exception.ValidationError,
self.migration_api.upgrade,
engine, self.REPOSITORY, 267)
self.assertIn("There are 1 records in the "
"'consoles' table where the uuid or "
"instance_uuid column is NULL.",
ex.kwargs['detail'])
# Remove the consoles entry with the null instance_uuid column.
rows = consoles.delete().where(
consoles.c['instance_uuid'] == null()).execute().rowcount
self.assertEqual(1, rows)
# Now run the 267 upgrade again.
self.migration_api.upgrade(engine, self.REPOSITORY, 267)
# Make sure the consoles entry with the null instance_uuid
# was deleted.
console = consoles.select(consoles.c.id == 1).execute().first()
self.assertIsNone(console)
def _check_268(self, engine, data):
# We can only assert that the col exists, not the unique constraint
# as the engine is running sqlite
self.assertColumnExists(engine, 'compute_nodes', 'host')
self.assertColumnExists(engine, 'shadow_compute_nodes', 'host')
compute_nodes = oslodbutils.get_table(engine, 'compute_nodes')
shadow_compute_nodes = oslodbutils.get_table(
engine, 'shadow_compute_nodes')
self.assertIsInstance(compute_nodes.c.host.type,
sqlalchemy.types.String)
self.assertIsInstance(shadow_compute_nodes.c.host.type,
sqlalchemy.types.String)
def _post_downgrade_268(self, engine):
self.assertColumnNotExists(engine, 'compute_nodes', 'host')
self.assertColumnNotExists(engine, 'shadow_compute_nodes', 'host')
def _check_269(self, engine, data):
self.assertColumnExists(engine, 'pci_devices', 'numa_node')
self.assertColumnExists(engine, 'shadow_pci_devices', 'numa_node')
pci_devices = oslodbutils.get_table(engine, 'pci_devices')
shadow_pci_devices = oslodbutils.get_table(
engine, 'shadow_pci_devices')
self.assertIsInstance(pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(pci_devices.c.numa_node.nullable)
self.assertIsInstance(shadow_pci_devices.c.numa_node.type,
sqlalchemy.types.Integer)
self.assertTrue(shadow_pci_devices.c.numa_node.nullable)
def _post_downgrade_269(self, engine):
self.assertColumnNotExists(engine, 'pci_devices', 'numa_node')
self.assertColumnNotExists(engine, 'shadow_pci_devices', 'numa_node')
class TestNovaMigrationsSQLite(NovaMigrationsCheckers,
test.TestCase,
test_base.DbTestCase):
pass
class TestNovaMigrationsMySQL(NovaMigrationsCheckers,
test.TestCase,
test_base.MySQLOpportunisticTestCase):
def test_innodb_tables(self):
with mock.patch.object(sa_migration, 'get_engine',
return_value=self.migrate_engine):
sa_migration.db_sync()
total = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA = '%(database)s'" %
{'database': self.migrate_engine.url.database})
self.assertTrue(total.scalar() > 0, "No tables found. Wrong schema?")
noninnodb = self.migrate_engine.execute(
"SELECT count(*) "
"FROM information_schema.TABLES "
"WHERE TABLE_SCHEMA='%(database)s' "
"AND ENGINE != 'InnoDB' "
"AND TABLE_NAME != 'migrate_version'" %
{'database': self.migrate_engine.url.database})
count = noninnodb.scalar()
self.assertEqual(count, 0, "%d non InnoDB tables created" % count)
class TestNovaMigrationsPostgreSQL(NovaMigrationsCheckers,
test.TestCase,
test_base.PostgreSQLOpportunisticTestCase):
pass
class ProjectTestCase(test.NoDBTestCase):
def test_all_migrations_have_downgrade(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../../')
py_glob = os.path.join(topdir, "nova", "db", "sqlalchemy",
"migrate_repo", "versions", "*.py")
missing_downgrade = []
for path in glob.iglob(py_glob):
has_upgrade = False
has_downgrade = False
with open(path, "r") as f:
for line in f:
if 'def upgrade(' in line:
has_upgrade = True
if 'def downgrade(' in line:
has_downgrade = True
if has_upgrade and not has_downgrade:
fname = os.path.basename(path)
missing_downgrade.append(fname)
helpful_msg = (_("The following migrations are missing a downgrade:"
"\n\t%s") % '\n\t'.join(sorted(missing_downgrade)))
self.assertFalse(missing_downgrade, helpful_msg)
|
Metaswitch/calico-nova
|
nova/tests/unit/db/test_migrations.py
|
Python
|
apache-2.0
| 28,211
|
from .estimator_base import *
class H2OKMeansEstimator(H2OEstimator):
def __init__(self, model_id=None, k=None, max_iterations=None,standardize=None,init=None,seed=None,
nfolds=None,fold_assignment=None, user_points=None,ignored_columns=None,
score_each_iteration=None, keep_cross_validation_predictions=None,
ignore_const_cols=None,checkpoint=None):
"""
Performs k-means clustering on an H2O dataset.
Parameters
----------
model_id : str, optional
The unique id assigned to the resulting model. If none is given, an id will
automatically be generated.
k : int
The number of clusters. Must be between 1 and 1e7 inclusive. k may be omitted
if the user specifies the initial centers in the init parameter. If k is not
omitted, in this case, then it should be equal to the number of user-specified
centers.
max_iterations : int
The maximum number of iterations allowed. Must be between 0 and 1e6 inclusive.
standardize : bool
Indicates whether the data should be standardized before running k-means.
init : str
A character string that selects the initial set of k cluster centers. Possible
values are
"Random": for random initialization,
"PlusPlus": for k-means plus initialization, or
"Furthest": for initialization at the furthest point from each successive
center.
Additionally, the user may specify a the initial centers as a matrix,
data.frame, H2OFrame, or list of vectors. For matrices, data.frames,
and H2OFrames, each row of the respective structure is an initial center. For
lists of vectors, each vector is an initial center.
seed : int, optional
Random seed used to initialize the cluster centroids.
nfolds : int, optional
Number of folds for cross-validation. If nfolds >= 2, then validation must
remain empty.
fold_assignment : str
Cross-validation fold assignment scheme, if fold_column is not specified
Must be "AUTO", "Random" or "Modulo"
:return: An instance of H2OClusteringModel.
"""
super(H2OKMeansEstimator, self).__init__()
self._parms = locals()
self._parms = {k:v for k,v in self._parms.iteritems() if k!="self"}
@property
def k(self):
return self._parms["k"]
@k.setter
def k(self, value):
self._parms["k"] = value
@property
def max_iterations(self):
return self._parms["max_iterations"]
@max_iterations.setter
def max_iterations(self, value):
self._parms["max_iterations"] = value
@property
def standardize(self):
return self._parms["standardize"]
@standardize.setter
def standardize(self, value):
self._parms["standardize"] = value
@property
def init(self):
return self._parms["init"]
@init.setter
def init(self, value):
self._parms["init"] = value
@property
def seed(self):
return self._parms["seed"]
@seed.setter
def seed(self, value):
self._parms["seed"] = value
@property
def nfolds(self):
return self._parms["nfolds"]
@nfolds.setter
def nfolds(self, value):
self._parms["nfolds"] = value
@property
def fold_assignment(self):
return self._parms["fold_assignment"]
@fold_assignment.setter
def fold_assignment(self, value):
self._parms["fold_assignment"] = value
@property
def user_points(self):
return self._parms["user_points"]
@user_points.setter
def user_points(self, value):
self._parms["user_points"] = value
@property
def ignored_columns(self):
return self._parms["ignored_columns"]
@ignored_columns.setter
def ignored_columns(self, value):
self._parms["ignored_columns"] = value
@property
def score_each_iteration(self):
return self._parms["score_each_iteration"]
@score_each_iteration.setter
def score_each_iteration(self, value):
self._parms["score_each_iteration"] = value
@property
def keep_cross_validation_predictions(self):
return self._parms["keep_cross_validation_predictions"]
@keep_cross_validation_predictions.setter
def keep_cross_validation_predictions(self, value):
self._parms["keep_cross_validation_predictions"] = value
@property
def ignore_const_cols(self):
return self._parms["ignore_const_cols"]
@ignore_const_cols.setter
def ignore_const_cols(self, value):
self._parms["ignore_const_cols"] = value
@property
def checkpoint(self):
return self._parms["checkpoint"]
@checkpoint.setter
def checkpoint(self, value):
self._parms["checkpoint"] = value
|
pchmieli/h2o-3
|
h2o-py/h2o/estimators/kmeans.py
|
Python
|
apache-2.0
| 4,684
|
# ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
from __future__ import absolute_import
from traits.api import List, Str, Dict, Button, Int, String, Event
from pychron.core.fuzzyfinder import fuzzyfinder
from pychron.envisage.browser.record_views import ProjectRecordView
from pychron.loggable import Loggable
class ProjectManager(Loggable):
oitems = List
items = List
filter_str = Str(enter_set=True, auto_set=False)
filter_attrs = Dict(
{
"name": "Name",
"unique_id": "UniqueID",
"principal_investigator": "Principal Investigator",
"lab_contact": "Lab Contact",
}
)
filter_attr = Str
# add_button = Button
# ir = Str
# institution = Str
# comment = String
# pi = Str
# lab_contact = Str
# pis = List
# lab_contacts = List
scroll_to_row = Int
project_name = String
comment = String
selected = List
save_button = Button
refresh = Event
def activated(self):
with self.dvc.session_ctx(use_parent_session=False):
self.items = self.oitems = [
ProjectRecordView(pr) for pr in self.dvc.get_projects()
]
# self._filter()
# self.pis = self.dvc.get_principal_investigator_names()
# self.lab_contacts = self.dvc.get_usernames()
# def prepare_destroy(self):
# self.dvc.close_session()
# private
# def _add(self):
# self.dvc.add_ir(self.pi, self.lab_contact,
# ir=self.ir,
# comment=self.comment,
# institution=self.institution)
#
# self.oitems = self.dvc.get_irs()
# self._filter()
def _project_name_changed(self, new):
if self.selected:
if len(self.selected) == 1:
p = self.selected[0]
p.name = new
if new != p.db_name:
p.dirty = True
else:
p.dirty = False
self.refresh = True
else:
self.warning_dialog("Can only edit the name of one project at a time")
def _comment_changed(self, new):
if self.selected:
for i in self.selected:
i.comment = new
if new != i.db_comment:
i.comment = new
i.dirty = True
else:
i.dirty = False
self.refresh = True
def _save_button_fired(self):
self.debug("Apply changes")
dvc = self.dvc
with dvc.session_ctx(use_parent_session=False):
commit = False
for item in self.oitems:
if item.dirty:
pr = dvc.get_project_by_id(item.unique_id)
pr.name = item.name
pr.comment = item.comment
item.db_comment = item.comment
item.db_name = item.name
item.dirty = False
commit = True
if commit:
dvc.commit()
def _filter(self):
if self.filter_str:
self.items = fuzzyfinder(self.filter_str, self.oitems, self.filter_attr)
else:
self.items = self.oitems
self.scroll_to_row = len(self.items) - 1
def _filter_str_changed(self):
self._filter()
def _filter_attr_changed(self):
self._filter()
# def _add_button_fired(self):
# self._add()
# ============= EOF =============================================
|
USGSDenverPychron/pychron
|
pychron/entry/tasks/project/project_manager.py
|
Python
|
apache-2.0
| 4,367
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Component tests for VPC Internal Load Balancer functionality with
Nuage VSP SDN plugin
"""
# Import Local Modules
from nuageTestCase import nuageTestCase
from marvin.lib.base import (Account,
ApplicationLoadBalancer,
Network,
Router)
from marvin.cloudstackAPI import (listInternalLoadBalancerVMs,
stopInternalLoadBalancerVM,
startInternalLoadBalancerVM)
# Import System Modules
from nose.plugins.attrib import attr
import copy
import time
class TestNuageInternalLb(nuageTestCase):
"""Test VPC Internal LB functionality with Nuage VSP SDN plugin
"""
@classmethod
def setUpClass(cls):
super(TestNuageInternalLb, cls).setUpClass()
return
def setUp(self):
# Create an account
self.account = Account.create(self.api_client,
self.test_data["account"],
admin=True,
domainid=self.domain.id
)
self.cleanup = [self.account]
return
# create_Internal_LB_Rule - Creates Internal LB rule in the given
# VPC network
def create_Internal_LB_Rule(self, network, vm_array=None, services=None,
source_ip=None):
self.debug("Creating Internal LB rule in VPC network with ID - %s" %
network.id)
if not services:
services = self.test_data["internal_lbrule"]
int_lb_rule = ApplicationLoadBalancer.create(
self.api_client,
services=services,
sourcenetworkid=network.id,
networkid=network.id,
sourceipaddress=source_ip
)
self.debug("Created Internal LB rule")
# Assigning VMs to the created Internal Load Balancer rule
if vm_array:
self.debug("Assigning virtual machines - %s to the created "
"Internal LB rule" % vm_array)
int_lb_rule.assign(self.api_client, vms=vm_array)
self.debug("Assigned VMs to the created Internal LB rule")
return int_lb_rule
# validate_Internal_LB_Rule - Validates the given Internal LB rule,
# matches the given Internal LB rule name and state against the list of
# Internal LB rules fetched
def validate_Internal_LB_Rule(self, int_lb_rule, state=None,
vm_array=None):
"""Validates the Internal LB Rule"""
self.debug("Check if the Internal LB Rule is created successfully ?")
int_lb_rules = ApplicationLoadBalancer.list(self.api_client,
id=int_lb_rule.id
)
self.assertEqual(isinstance(int_lb_rules, list), True,
"List Internal LB Rule should return a valid list"
)
self.assertEqual(int_lb_rule.name, int_lb_rules[0].name,
"Name of the Internal LB Rule should match with the "
"returned list data"
)
if state:
self.assertEqual(int_lb_rules[0].loadbalancerrule[0].state, state,
"Internal LB Rule state should be '%s'" % state
)
if vm_array:
instance_ids = [instance.id for instance in
int_lb_rules[0].loadbalancerinstance]
for vm in vm_array:
self.assertEqual(vm.id in instance_ids, True,
"Internal LB instance list should have the "
"VM with ID - %s" % vm.id
)
self.debug("Internal LB Rule creation successfully validated for %s" %
int_lb_rule.name)
# list_InternalLbVms - Lists deployed Internal LB VM instances
def list_InternalLbVms(self, network_id=None, source_ip=None):
listInternalLoadBalancerVMsCmd = \
listInternalLoadBalancerVMs.listInternalLoadBalancerVMsCmd()
listInternalLoadBalancerVMsCmd.account = self.account.name
listInternalLoadBalancerVMsCmd.domainid = self.account.domainid
if network_id:
listInternalLoadBalancerVMsCmd.networkid = network_id
internal_lb_vms = self.api_client.listInternalLoadBalancerVMs(
listInternalLoadBalancerVMsCmd)
if source_ip:
return [internal_lb_vm for internal_lb_vm in internal_lb_vms
if str(internal_lb_vm.guestipaddress) == source_ip]
else:
return internal_lb_vms
# get_InternalLbVm - Returns Internal LB VM instance for the given VPC
# network and source ip
def get_InternalLbVm(self, network, source_ip):
self.debug("Finding the InternalLbVm for network with ID - %s and "
"source IP address - %s" % (network.id, source_ip))
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVms should return a valid list"
)
return internal_lb_vms[0]
# stop_InternalLbVm - Stops the given Internal LB VM instance
def stop_InternalLbVm(self, int_lb_vm, force=False):
self.debug("Stopping InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = stopInternalLoadBalancerVM.stopInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
if force:
cmd.forced = force
self.api_client.stopInternalLoadBalancerVM(cmd)
# start_InternalLbVm - Starts the given Internal LB VM instance
def start_InternalLbVm(self, int_lb_vm):
self.debug("Starting InternalLbVm with ID - %s" % int_lb_vm.id)
cmd = startInternalLoadBalancerVM.startInternalLoadBalancerVMCmd()
cmd.id = int_lb_vm.id
self.api_client.startInternalLoadBalancerVM(cmd)
# check_InternalLbVm_state - Checks if the Internal LB VM instance of the
# given VPC network and source IP is in the expected state form the list of
# fetched Internal LB VM instances
def check_InternalLbVm_state(self, network, source_ip, state=None):
self.debug("Check if the InternalLbVm is in state - %s" % state)
internal_lb_vms = self.list_InternalLbVms(network.id, source_ip)
self.assertEqual(isinstance(internal_lb_vms, list), True,
"List InternalLbVm should return a valid list"
)
if state:
self.assertEqual(internal_lb_vms[0].state, state,
"InternalLbVm is not in the expected state"
)
self.debug("InternalLbVm instance - %s is in the expected state - %s" %
(internal_lb_vms[0].name, state))
# verify_vpc_vm_ingress_traffic - Verifies ingress traffic to the given VM
# (SSH into VM) via a created Static NAT rule in the given VPC network
def verify_vpc_vm_ingress_traffic(self, vm, network, vpc):
self.debug("Verifying ingress traffic to the VM (SSH into VM) - %s "
"via a created Static NAT rule in the VPC network - %s" %
(vm, network))
# Creating Static NAT rule for the given VM in the given VPC network
self.debug("Creating Static NAT Rule...")
test_public_ip = self.acquire_PublicIPAddress(network, vpc)
self.validate_PublicIPAddress(test_public_ip, network)
self.create_StaticNatRule_For_VM(vm, test_public_ip, network)
self.validate_PublicIPAddress(
test_public_ip, network, static_nat=True, vm=vm)
# VSD verification
self.verify_vsd_floating_ip(network, vm, test_public_ip.ipaddress, vpc)
# Adding Network ACL rule in the given VPC network
self.debug("Creating Network ACL rule ...")
test_public_ssh_rule = self.create_NetworkAclRule(
self.test_data["ingress_rule"], network=network)
# VSD verification
self.verify_vsd_firewall_rule(test_public_ssh_rule)
# SSH into VM
self.debug("Verifying VM ingress traffic (SSH into VM)...")
self.ssh_into_VM(vm, test_public_ip)
# Removing Network ACL rule in the given VPC network
self.debug("Removing the created Network ACL rule...")
test_public_ssh_rule.delete(self.api_client)
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_firewall_rule(test_public_ssh_rule)
self.debug("Network ACL rule successfully deleted in VSD")
# Deleting Static NAT Rule
self.debug("Deleting the created Static NAT Rule...")
self.delete_StaticNatRule_For_VM(test_public_ip)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(
test_public_ip, network, static_nat=True, vm=vm)
self.debug("Static NAT Rule successfully deleted in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_floating_ip(
network, vm, test_public_ip.ipaddress, vpc=vpc)
self.debug("Floating IP successfully deleted in VSD")
# Releasing acquired public IP
self.debug("Releasing the acquired public IP...")
test_public_ip.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_PublicIPAddress(test_public_ip, network)
self.debug("Acquired public IP in the network successfully released "
"in CloudStack")
self.debug("Successfully verified ingress traffic to the VM "
"(SSH into VM) - %s via a created Static NAT rule in the "
"VPC network - %s" % (vm, network))
# wget_from_vm_cmd - From within the given VM (ssh client),
# fetches index.html file of web server running with the given public IP
def wget_from_vm_cmd(self, ssh_client, ip_address, port):
wget_file = ""
cmd = "rm -rf index.html*"
self.execute_cmd(ssh_client, cmd)
cmd = "wget --no-cache -t 1 http://" + ip_address + ":" + str(port) + \
"/"
response = self.execute_cmd(ssh_client, cmd)
if "200 OK" in response:
self.debug("wget from a VM with http server IP address "
"- %s is successful" % ip_address)
# Reading the wget file
cmd = "cat index.html"
wget_file = self.execute_cmd(ssh_client, cmd)
# Removing the wget file
cmd = "rm -rf index.html*"
self.execute_cmd(ssh_client, cmd)
else:
self.debug("Failed to wget from a VM with http server IP address "
"- %s" % ip_address)
return wget_file
# verify_lb_wget_file - Verifies that the given wget file (index.html)
# belongs to the given Internal LB rule
# assigned VMs (vm array)
def verify_lb_wget_file(self, wget_file, vm_array):
wget_server_ip = None
for vm in vm_array:
for nic in vm.nic:
if str(nic.ipaddress) in str(wget_file):
wget_server_ip = str(nic.ipaddress)
if wget_server_ip:
self.debug("Verified wget file from Internal Load Balanced VMs - "
"%s" % vm_array)
else:
self.fail("Failed to verify wget file from Internal Load Balanced "
"VMs - %s" % vm_array)
return wget_server_ip
# validate_internallb_algorithm_traffic - Validates Internal LB algorithms
# by performing multiple wget traffic tests against the given Internal LB
# VM instance (source port)
def validate_internallb_algorithm_traffic(self, ssh_client, source_ip,
port, vm_array, algorithm):
# Internal LB (wget) traffic tests
iterations = 2 * len(vm_array)
wget_files = []
for i in range(iterations):
wget_files.append(
self.wget_from_vm_cmd(ssh_client, source_ip, port))
# Verifying Internal LB (wget) traffic tests
wget_servers_ip_list = []
for i in range(iterations):
wget_servers_ip_list.append(
self.verify_lb_wget_file(wget_files[i], vm_array))
# Validating Internal LB algorithm
if algorithm == "roundrobin" or algorithm == "leastconn":
for i in range(iterations):
if wget_servers_ip_list.count(wget_servers_ip_list[i]) \
is not 2:
self.fail("Round Robin Internal LB algorithm validation "
"failed - %s" % wget_servers_ip_list)
self.debug("Successfully validated Round Robin/Least connections "
"Internal LB algorithm - %s" % wget_servers_ip_list)
if algorithm == "source":
for i in range(iterations):
if wget_servers_ip_list.count(wget_servers_ip_list[i]) \
is not iterations:
self.fail("Source Internal LB algorithm validation failed "
"- %s" % wget_servers_ip_list)
self.debug("Successfully validated Source Internal LB algorithm - "
"%s" % wget_servers_ip_list)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_01_nuage_internallb_vpc_Offering(self):
"""Test Nuage VSP VPC Offering with different combinations of LB
service providers
"""
# 1. Verify that the network service providers supported by Nuage VSP
# for VPC Internal LB functionality are all successfully created and
# enabled.
# 2. Create Nuage VSP VPC offering with LB service provider as
# "InternalLbVm", check if it is successfully created and enabled.
# Verify that the VPC creation succeeds with this VPC offering.
# 3. Create Nuage VSP VPC offering with LB service provider as
# "VpcVirtualRouter", check if it is successfully created and
# enabled. Verify that the VPC creation fails with this VPC offering
# as Nuage VSP does not support provider "VpcVirtualRouter" for
# service LB.
# 4. Create Nuage VSP VPC offering with LB service provider as
# "Netscaler", check if it is successfully created and enabled.
# Verify that the VPC creation fails with this VPC offering as Nuage
# VSP does not support provider "Netscaler" for service LB.
# 5. Delete all the created objects (cleanup).
self.debug("Validating network service providers supported by Nuage "
"VSP for VPC Internal LB functionality")
providers = ["NuageVsp", "VpcVirtualRouter", "InternalLbVm"]
for provider in providers:
self.validate_NetworkServiceProvider(provider, state="Enabled")
# Creating VPC offerings
self.debug("Creating Nuage VSP VPC offering with LB service provider "
"as InternalLbVm...")
vpc_off_1 = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC offering with LB service provider "
"as VpcVirtualRouter...")
vpc_offering_lb = copy.deepcopy(
self.test_data["nuagevsp"]["vpc_offering_lb"])
vpc_offering_lb["serviceProviderList"]["Lb"] = "VpcVirtualRouter"
vpc_off_2 = self.create_VpcOffering(vpc_offering_lb)
self.validate_VpcOffering(vpc_off_2, state="Enabled")
self.debug("Creating Nuage VSP VPC offering with LB service provider "
"as Netscaler...")
vpc_offering_lb["serviceProviderList"]["Lb"] = "Netscaler"
vpc_off_3 = self.create_VpcOffering(vpc_offering_lb)
self.validate_VpcOffering(vpc_off_3, state="Enabled")
self.debug("Creating Nuage VSP VPC offering without LB service...")
vpc_off_4 = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off_4, state="Enabled")
# Creating VPCs
self.debug("Creating a VPC with LB service provider as "
"InternalLbVm...")
vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_1, state="Enabled")
self.debug("Creating a VPC with LB service provider as "
"VpcVirtualRouter...")
with self.assertRaises(Exception):
self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
self.debug("Nuage VSP does not support provider VpcVirtualRouter for "
"service LB for VPCs")
self.debug("Creating a VPC with LB service provider as Netscaler...")
with self.assertRaises(Exception):
self.create_Vpc(vpc_off_3, cidr='10.1.0.0/16')
self.debug("Nuage VSP does not support provider Netscaler for service "
"LB for VPCs")
self.debug("Creating a VPC without LB service...")
vpc_2 = self.create_Vpc(vpc_off_4, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_2, state="Enabled")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_02_nuage_internallb_vpc_network_offering(self):
"""Test Nuage VSP VPC Network Offering with and without Internal LB
service
"""
# 1. Create Nuage VSP VPC Network offering with LB Service Provider as
# "InternalLbVm" and LB Service Capability "lbSchemes" as
# "internal", check if it is successfully created and enabled.
# Verify that the VPC network creation succeeds with this Network
# offering.
# 2. Recreate above Network offering with ispersistent False, check if
# it is successfully created and enabled.Verify that the VPC network
# creation fails with this Network offering as Nuage VSP does not
# support non persistent VPC networks.
# 3. Recreate above Network offering with conserve mode On, check if
# the network offering creation failed as only networks with
# conserve mode Off can belong to VPC.
# 4. Create Nuage VSP VPC Network offering with LB Service Provider as
# "InternalLbVm" and LB Service Capability "lbSchemes" as "public",
# check if the network offering creation failed as "public" lbScheme
# is not supported for LB Service Provider "InternalLbVm".
# 5. Create Nuage VSP VPC Network offering without Internal LB Service,
# check if it is successfully created and enabled. Verify that the
# VPC network creation succeeds with this Network offering.
# 6. Recreate above Network offering with ispersistent False, check if
# it is successfully created and enabled. Verify that the VPC
# network creation fails with this Network offering as Nuage VSP
# does not support non persistent VPC networks.
# 7. Recreate the above Network offering with conserve mode On, check
# if the network offering creation failed as only networks with
# conserve mode Off can belong to VPC.
# 8. Delete all the created objects (cleanup).
# Creating VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB "
"service...")
vpc_off = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with LB Service "
"Provider as InternalLbVm and LB Service Capability "
"lbSchemes as internal...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Recreating above Network offering with ispersistent "
"False...")
vpc_net_off_lb_non_persistent = copy.deepcopy(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
vpc_net_off_lb_non_persistent["ispersistent"] = "False"
net_off_2 = self.create_NetworkOffering(vpc_net_off_lb_non_persistent)
self.validate_NetworkOffering(net_off_2, state="Enabled")
self.debug("Recreating above Network offering with conserve mode "
"On...")
with self.assertRaises(Exception):
self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"],
conserve_mode=True)
self.debug("Network offering creation failed as only networks with "
"conserve mode Off can belong to VPC")
self.debug("Creating Nuage VSP VPC Network offering with LB Service "
"Provider as InternalLbVm and LB Service Capability "
"lbSchemes as public...")
network_offering_internal_lb = copy.deepcopy(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
service_list = network_offering_internal_lb["serviceCapabilityList"]
service_list["Lb"]["lbSchemes"] = "public"
network_offering_internal_lb["serviceCapabilityList"] = service_list
with self.assertRaises(Exception):
self.create_NetworkOffering(network_offering_internal_lb)
self.debug("Network offering creation failed as public lbScheme is "
"not supported for LB Service Provider InternalLbVm")
self.debug("Creating Nuage VSP VPC Network offering without Internal "
"LB service...")
net_off_3 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_3, state="Enabled")
self.debug("Recreating above Network offering with ispersistent "
"False...")
vpc_net_off_non_persistent = copy.deepcopy(
self.test_data["nuagevsp"]["vpc_network_offering"])
vpc_net_off_non_persistent["ispersistent"] = "False"
net_off_4 = self.create_NetworkOffering(vpc_net_off_non_persistent)
self.validate_NetworkOffering(net_off_4, state="Enabled")
self.debug("Recreating above Network offering with conserve mode "
"On...")
with self.assertRaises(Exception):
self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"],
conserve_mode=True)
self.debug("Network offering creation failed as only networks with "
"conserve mode Off can belong to VPC")
# Creating VPC networks in the VPC
self.debug("Creating a persistent VPC network with Internal LB "
"service...")
internal_tier = self.create_Network(
net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.debug("Creating a non persistent VPC network with Internal LB "
"service...")
with self.assertRaises(Exception):
self.create_Network(net_off_2, gateway='10.1.2.1', vpc=vpc)
self.debug("Nuage VSP does not support non persistent VPC networks")
self.debug("Creating a persistent VPC network without Internal LB "
"service...")
public_tier = self.create_Network(
net_off_3, gateway='10.1.3.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_router(vr)
self.debug("Creating a non persistent VPC network without Internal LB "
"service...")
with self.assertRaises(Exception):
self.create_Network(net_off_4, gateway='10.1.4.1', vpc=vpc)
self.debug("Nuage VSP does not support non persistent VPC networks")
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_03_nuage_internallb_vpc_networks(self):
"""Test Nuage VSP VPC Networks with and without Internal LB service
"""
# 1. Create Nuage VSP VPC offering with Internal LB service, check if
# it is successfully created and enabled.
# 2. Create Nuage VSP VPC offering without Internal LB service, check
# if it is successfully created and enabled.
# 3. Create a VPC "vpc_1" with Internal LB service, check if it is
# successfully created and enabled.
# 4. Create a VPC "vpc_2" without Internal LB service, check if it is
# successfully created and enabled.
# 5. Create Nuage VSP VPC Network offering with Internal LB service,
# check if it is successfully created and enabled.
# 6. Create Nuage VSP VPC Network offering without Internal LB service,
# check if it is successfully created and enabled.
# 7. Create a VPC network in vpc_1 with Internal LB service and spawn a
# VM, check if the tier is added to the VPC VR, and the VM is
# deployed successfully in the tier.
# 8. Create one more VPC network in vpc_1 with Internal LB service and
# spawn a VM, check if the tier is added to the VPC VR, and the VM
# is deployed successfully in the tier.
# 9. Create a VPC network in vpc_2 with Internal LB service, check if
# the tier creation failed.
# 10. Create a VPC network in vpc_1 without Internal LB service and
# spawn a VM, check if the tier is added to the VPC VR, and the VM
# is deployed successfully in the tier.
# 11. Create a VPC network in vpc_2 without Internal LB service and
# spawn a VM, check if the tier is added to the VPC VR, and the VM
# is deployed successfully in the tier.
# 12. Upgrade the VPC network with Internal LB service to one with no
# Internal LB service and vice-versa, check if the VPC Network
# offering upgrade passed in both directions.
# 13. Delete the VPC network with Internal LB service, check if the
# tier is successfully deleted.
# 14. Recreate the VPC network with Internal LB service, check if the
# tier is successfully re-created.
# 15. Delete all the created objects (cleanup).
# Creating VPC offerings
self.debug("Creating Nuage VSP VPC offering with Internal LB "
"service...")
vpc_off_1 = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC offering without Internal LB "
"service...")
vpc_off_2 = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering"])
self.validate_VpcOffering(vpc_off_2, state="Enabled")
# Creating VPCs
self.debug("Creating a VPC with Internal LB service...")
vpc_1 = self.create_Vpc(vpc_off_1, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_1, state="Enabled")
self.debug("Creating a VPC without Internal LB service...")
vpc_2 = self.create_Vpc(vpc_off_2, cidr='10.1.0.0/16')
self.validate_Vpc(vpc_2, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
"service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal "
"LB service...")
net_off_2 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in VPCs, and deploying VMs
self.debug("Creating a VPC network in vpc_1 with Internal LB "
"service...")
internal_tier_1 = self.create_Network(
net_off_1, gateway='10.1.1.1', vpc=vpc_1)
self.validate_Network(internal_tier_1, state="Implemented")
vr_1 = self.get_Router(internal_tier_1)
self.check_Router_state(vr_1, state="Running")
self.debug("Deploying a VM in the created VPC network...")
internal_vm_1 = self.create_VM(internal_tier_1)
self.check_VM_state(internal_vm_1, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier_1, vpc_1)
self.verify_vsd_router(vr_1)
self.verify_vsd_vm(internal_vm_1)
self.debug("Creating one more VPC network in vpc_1 with Internal LB "
"service...")
internal_tier_2 = self.create_Network(
net_off_1, gateway='10.1.2.1', vpc=vpc_1)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.debug("Deploying a VM in the created VPC network...")
internal_vm_2 = self.create_VM(internal_tier_2)
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsd_router(vr_1)
self.verify_vsd_vm(internal_vm_2)
self.debug("Creating a VPC network in vpc_2 with Internal LB "
"service...")
with self.assertRaises(Exception):
self.create_Network(net_off_1, gateway='10.1.1.1', vpc=vpc_2)
self.debug("VPC Network creation failed as vpc_2 does not support "
"Internal Lb service")
self.debug("Creating a VPC network in vpc_1 without Internal LB "
"service...")
public_tier_1 = self.create_Network(
net_off_2, gateway='10.1.3.1', vpc=vpc_1)
self.validate_Network(public_tier_1, state="Implemented")
vr_1 = self.get_Router(public_tier_1)
self.check_Router_state(vr_1, state="Running")
self.debug("Deploying a VM in the created VPC network...")
public_vm_1 = self.create_VM(public_tier_1)
self.check_VM_state(public_vm_1, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier_1, vpc_1)
self.verify_vsd_router(vr_1)
self.verify_vsd_vm(public_vm_1)
self.debug("Creating a VPC network in vpc_2 without Internal LB "
"service...")
public_tier_2 = self.create_Network(
net_off_2, gateway='10.1.1.1', vpc=vpc_2)
self.validate_Network(public_tier_2, state="Implemented")
vr_2 = self.get_Router(public_tier_2)
self.check_Router_state(vr_2, state="Running")
self.debug("Deploying a VM in the created VPC network...")
public_vm_2 = self.create_VM(public_tier_2)
self.check_VM_state(public_vm_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier_2, vpc_2)
self.verify_vsd_router(vr_2)
self.verify_vsd_vm(public_vm_2)
# Upgrading a VPC network
self.debug("Upgrading a VPC network with Internal LB Service to one "
"without Internal LB Service...")
self.upgrade_Network(net_off_2, internal_tier_2)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsd_router(vr_1)
self.verify_vsd_vm(internal_vm_2)
self.debug("Upgrading a VPC network without Internal LB Service to "
"one with Internal LB Service...")
self.upgrade_Network(net_off_1, internal_tier_2)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsd_router(vr_1)
self.verify_vsd_vm(internal_vm_2)
# Deleting and re-creating a VPC network
self.debug("Deleting a VPC network with Internal LB Service...")
self.delete_VM(internal_vm_2)
self.delete_Network(internal_tier_2)
with self.assertRaises(Exception):
self.validate_Network(internal_tier_2)
self.debug("VPC network successfully deleted in CloudStack")
# VSD verification
with self.assertRaises(Exception):
self.verify_vsd_network(self.domain.id, internal_tier_2, vpc_1)
self.debug("VPC network successfully deleted in VSD")
self.debug("Recreating a VPC network with Internal LB Service...")
internal_tier_2 = self.create_Network(
net_off_1, gateway='10.1.2.1', vpc=vpc_1)
internal_vm_2 = self.create_VM(internal_tier_2)
self.validate_Network(internal_tier_2, state="Implemented")
vr_1 = self.get_Router(internal_tier_2)
self.check_Router_state(vr_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier_2, vpc_1)
self.verify_vsd_router(vr_1)
self.verify_vsd_vm(internal_vm_2)
@attr(tags=["advanced", "nuagevsp"], required_hardware="false")
def test_04_nuage_internallb_rules(self):
"""Test Nuage VSP VPC Internal LB functionality with different
combinations of Internal LB rules
"""
# 1. Create an Internal LB Rule with source IP Address specified, check
# if the Internal LB Rule is successfully created.
# 2. Create an Internal LB Rule without source IP Address specified,
# check if the Internal LB Rule is successfully created.
# 3. Create an Internal LB Rule when the specified source IP Address is
# outside the VPC network (tier) CIDR range, check if the Internal
# LB Rule creation failed as the requested source IP is not in the
# network's CIDR subnet.
# 4. Create an Internal LB Rule when the specified source IP Address is
# outside the VPC super CIDR range, check if the Internal LB Rule
# creation failed as the requested source IP is not in the network's
# CIDR subnet.
# 5. Create an Internal LB Rule in the tier with LB service provider as
# VpcInlineLbVm, check if the Internal LB Rule creation failed as
# Scheme Internal is not supported by this network offering.
# 6. Create multiple Internal LB Rules using different Load Balancing
# source IP Addresses, check if the Internal LB Rules are
# successfully created.
# 7. Create multiple Internal LB Rules with different ports but using
# the same Load Balancing source IP Address, check if the Internal
# LB Rules are successfully created.
# 8. Create multiple Internal LB Rules with same ports and using the
# same Load Balancing source IP Address, check if the second
# Internal LB Rule creation failed as it conflicts with the first
# Internal LB rule.
# 9. Attach a VM to the above created Internal LB Rules, check if the
# VM is successfully attached to the Internal LB Rules.
# 10. Verify the InternalLbVm deployment after successfully creating
# the first Internal LB Rule and attaching a VM to it.
# 11. Verify the failure of attaching a VM from a different tier to an
# Internal LB Rule created on a tier.
# 12. Delete the above created Internal LB Rules, check if the Internal
# LB Rules are successfully deleted.
# 13. Delete all the created objects (cleanup).
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB "
"service...")
vpc_off = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
"service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal "
"LB service...")
net_off_2 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(
net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(
net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(public_vm)
# Creating Internal LB Rules
self.debug("Creating an Internal LB Rule without source IP Address "
"specified...")
int_lb_rule = self.create_Internal_LB_Rule(internal_tier)
self.validate_Internal_LB_Rule(int_lb_rule, state="Add")
# Validating InternalLbVm deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(
internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm is not deployed in the network as there are "
"no VMs assigned to this Internal LB Rule")
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
free_source_ip = int_lb_rule.sourceipaddress
self.debug("Creating an Internal LB Rule with source IP Address "
"specified...")
int_lb_rule = self.create_Internal_LB_Rule(
internal_tier, source_ip=free_source_ip)
self.validate_Internal_LB_Rule(int_lb_rule, state="Add")
# Validating InternalLbVm deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(
internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm is not deployed in the network as there are "
"no VMs assigned to this Internal LB Rule")
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
self.debug("Creating an Internal LB Rule when the specified source IP "
"Address is outside the VPC network CIDR range...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, source_ip="10.1.1.256")
self.debug("Internal LB Rule creation failed as the requested IP is "
"not in the network's CIDR subnet")
self.debug("Creating an Internal LB Rule when the specified source IP "
"Address is outside the VPC super CIDR range...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, source_ip="10.2.1.256")
self.debug("Internal LB Rule creation failed as the requested IP is "
"not in the network's CIDR subnet")
self.debug("Creating an Internal LB Rule in a VPC network without "
"Internal Lb service...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(public_tier)
self.debug("Internal LB Rule creation failed as Scheme Internal is "
"not supported by this network offering")
self.debug("Creating multiple Internal LB Rules using different Load "
"Balancing source IP Addresses...")
int_lb_rule_1 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVms deployment and state
int_lb_vm_1 = self.get_InternalLbVm(
internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
int_lb_vm_2 = self.get_InternalLbVm(
internal_tier, int_lb_rule_2.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_2.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_1)
self.verify_vsd_lb_device(int_lb_vm_2)
self.debug('Removing VMs from the Internal LB Rules - %s, %s' %
(int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(
int_lb_rule_1, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in "
"CloudStack")
int_lb_rule_2.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(
int_lb_rule_2, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in "
"CloudStack")
# Validating InternalLbVms state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_2.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_1)
self.verify_vsd_lb_device(int_lb_vm_2)
self.debug('Deleting the Internal LB Rules - %s, %s' %
(int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1)
self.debug("Internal LB Rule successfully deleted in CloudStack")
int_lb_rule_2.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVms un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
with self.assertRaises(Exception):
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_2.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
# VSD Verification
with self.assertRaises(Exception):
self.verify_vsd_lb_device(int_lb_vm_1)
self.debug("InternalLbVm successfully destroyed in VSD")
with self.assertRaises(Exception):
self.verify_vsd_lb_device(int_lb_vm_2)
self.debug("InternalLbVm successfully destroyed in VSD")
self.debug("Creating multiple Internal LB Rules with different ports "
"but using the same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(
internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
self.debug('Removing VMs from the Internal LB Rules - %s, %s' %
(int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(
int_lb_rule_1, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in "
"CloudStack")
int_lb_rule_2.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(
int_lb_rule_2, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in "
"CloudStack")
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
self.debug('Deleting the Internal LB Rules - %s, %s' %
(int_lb_rule_1.name, int_lb_rule_2.name))
int_lb_rule_1.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_1)
self.debug("Internal LB Rule successfully deleted in CloudStack")
int_lb_rule_2.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule_2)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVm un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
# VSD Verification
with self.assertRaises(Exception):
self.verify_vsd_lb_device(int_lb_vm)
self.debug("InternalLbVm successfully destroyed in VSD")
self.debug("Creating multiple Internal LB Rules with same ports and "
"using the same Load Balancing source IP Address...")
int_lb_rule = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(
int_lb_rule, state="Active", vm_array=[internal_vm])
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm],
source_ip=int_lb_rule.sourceipaddress)
self.debug("Internal LB Rule creation failed as it conflicts with the "
"existing rule")
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(
internal_tier, int_lb_rule.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
self.debug('Removing VMs from the Internal LB Rule - %s' %
int_lb_rule.name)
int_lb_rule.remove(self.api_client, vms=[internal_vm])
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule, vm_array=[internal_vm])
self.debug("VMs successfully removed from the Internal LB Rule in "
"CloudStack")
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
self.debug('Deleting the Internal LB Rule - %s' % int_lb_rule.name)
int_lb_rule.delete(self.api_client)
with self.assertRaises(Exception):
self.validate_Internal_LB_Rule(int_lb_rule)
self.debug("Internal LB Rule successfully deleted in CloudStack")
# Validating InternalLbVm un-deployment
with self.assertRaises(Exception):
self.check_InternalLbVm_state(
internal_tier, int_lb_rule.sourceipaddress)
self.debug("InternalLbVm successfully destroyed in CloudStack")
# VSD Verification
with self.assertRaises(Exception):
self.verify_vsd_lb_device(int_lb_vm)
self.debug("InternalLbVm successfully destroyed in VSD")
self.debug("Attaching a VM from a different tier to an Internal LB "
"Rule created on a tier...")
with self.assertRaises(Exception):
self.create_Internal_LB_Rule(internal_tier, vm_array=[public_vm])
self.debug("Internal LB Rule creation failed as the VM belongs to a "
"different network")
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_05_nuage_internallb_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality by performing (wget)
traffic tests within a VPC
"""
# 1. Create three different Internal LB Rules with a single source IP
# Address specified on the Internal tier, check if the Internal LB
# Rules are created successfully.
# 2. Attach a VM to the above created Internal LB Rules, check if the
# InternalLbVm is successfully deployed in the Internal tier.
# 3. Deploy two more VMs in the Internal tier, check if the VMs are
# successfully deployed.
# 4. Attach the newly deployed VMs to the above created Internal LB
# Rules, verify the validity of the above created Internal LB Rules
# over three Load Balanced VMs in the Internal tier.
# 5. Create the corresponding Network ACL rules to make the created
# Internal LB rules (SSH & HTTP) accessible, check if the Network
# ACL rules are successfully added to the internal tier.
# 6. Validate the Internal LB functionality by performing (wget)
# traffic tests from a VM in the Public tier to the Internal load
# balanced guest VMs in the Internal tier, using Static NAT
# functionality to access (ssh) the VM on the Public tier.
# 7. Verify that the InternalLbVm gets destroyed when the last Internal
# LB rule is removed from the Internal tier.
# 8. Repeat the above steps for one more Internal tier as well,
# validate the Internal LB functionality.
# 9. Delete all the created objects (cleanup).
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB "
"service...")
vpc_off = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
"service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal "
"LB service...")
net_off_2 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier_1 = self.create_Network(
net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier_1, state="Implemented")
vr = self.get_Router(internal_tier_1)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
internal_vm_1 = self.create_VM(internal_tier_1)
self.check_VM_state(internal_vm_1, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier_1, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm_1)
self.debug("Creating one more VPC network with Internal LB service...")
internal_tier_2 = self.create_Network(
net_off_1, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(internal_tier_2, state="Implemented")
vr = self.get_Router(internal_tier_2)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
internal_vm_2 = self.create_VM(internal_tier_2)
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier_2, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm_2)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(
net_off_2, gateway='10.1.3.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(public_vm)
# Creating Internal LB Rules in the Internal tiers
self.debug("Creating three Internal LB Rules (SSH & HTTP) using the "
"same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(
internal_tier_1, vm_array=[internal_vm_1])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active", vm_array=[internal_vm_1])
int_lb_rule_2 = self.create_Internal_LB_Rule(
internal_tier_1, vm_array=[internal_vm_1],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active", vm_array=[internal_vm_1])
internal_lbrule_http = copy.deepcopy(
self.test_data["internal_lbrule_http"])
internal_lbrule_http["sourceport"] = 8080
internal_lbrule_http["instanceport"] = 8080
int_lb_rule_3 = self.create_Internal_LB_Rule(
internal_tier_1,
vm_array=[internal_vm_1],
services=internal_lbrule_http,
source_ip=int_lb_rule_1.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_3, state="Active", vm_array=[internal_vm_1])
# Validating InternalLbVm deployment and state
int_lb_vm_1 = self.get_InternalLbVm(
internal_tier_1, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_1)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" %
internal_tier_1.name)
internal_vm_1_1 = self.create_VM(internal_tier_1)
internal_vm_1_2 = self.create_VM(internal_tier_1)
# VSD verification
self.verify_vsd_vm(internal_vm_1_1)
self.verify_vsd_vm(internal_vm_1_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal "
"LB rules...")
int_lb_rule_1.assign(
self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
int_lb_rule_2.assign(
self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
int_lb_rule_3.assign(
self.api_client, [internal_vm_1_1, internal_vm_1_2])
self.validate_Internal_LB_Rule(
int_lb_rule_3, state="Active",
vm_array=[internal_vm_1, internal_vm_1_1, internal_vm_1_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier_1, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_1)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB "
"rules (HTTP) accessible...")
http_rule_1 = self.create_NetworkAclRule(
self.test_data["http_rule"], network=internal_tier_1)
http_rule = copy.deepcopy(self.test_data["http_rule"])
http_rule["privateport"] = 8080
http_rule["publicport"] = 8080
http_rule["startport"] = 8080
http_rule["endport"] = 8080
http_rule_2 = self.create_NetworkAclRule(
http_rule, network=internal_tier_1)
# VSD verification
self.verify_vsd_firewall_rule(http_rule_1)
self.verify_vsd_firewall_rule(http_rule_2)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating three Internal LB Rules (SSH & HTTP) using the "
"same Load Balancing source IP Address...")
int_lb_rule_4 = self.create_Internal_LB_Rule(
internal_tier_2, vm_array=[internal_vm_2])
self.validate_Internal_LB_Rule(
int_lb_rule_4, state="Active", vm_array=[internal_vm_2])
int_lb_rule_5 = self.create_Internal_LB_Rule(
internal_tier_2,
vm_array=[internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_4.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_5, state="Active", vm_array=[internal_vm_2])
int_lb_rule_6 = self.create_Internal_LB_Rule(
internal_tier_2,
vm_array=[internal_vm_2],
services=internal_lbrule_http,
source_ip=int_lb_rule_4.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_6, state="Active", vm_array=[internal_vm_2])
# Validating InternalLbVm deployment and state
int_lb_vm_2 = self.get_InternalLbVm(
internal_tier_2, int_lb_rule_4.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier_2, int_lb_rule_4.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_2)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" %
internal_tier_2.name)
internal_vm_2_1 = self.create_VM(internal_tier_2)
internal_vm_2_2 = self.create_VM(internal_tier_2)
# VSD verification
self.verify_vsd_vm(internal_vm_2_1)
self.verify_vsd_vm(internal_vm_2_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal "
"LB rules...")
int_lb_rule_4.assign(
self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(
int_lb_rule_4, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
int_lb_rule_5.assign(
self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(
int_lb_rule_5, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
int_lb_rule_6.assign(
self.api_client, [internal_vm_2_1, internal_vm_2_2])
self.validate_Internal_LB_Rule(
int_lb_rule_6, state="Active",
vm_array=[internal_vm_2, internal_vm_2_1, internal_vm_2_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier_2, int_lb_rule_4.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_2)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB "
"rules (HTTP) accessible...")
http_rule_1 = self.create_NetworkAclRule(
self.test_data["http_rule"], network=internal_tier_2)
http_rule_2 = self.create_NetworkAclRule(
http_rule, network=internal_tier_2)
# VSD verification
self.verify_vsd_firewall_rule(http_rule_1)
self.verify_vsd_firewall_rule(http_rule_2)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(
internal_vm_1, internal_tier_1, vpc)
self.verify_vpc_vm_ingress_traffic(
internal_vm_1_1, internal_tier_1, vpc)
self.verify_vpc_vm_ingress_traffic(
internal_vm_1_2, internal_tier_1, vpc)
self.verify_vpc_vm_ingress_traffic(
internal_vm_2, internal_tier_2, vpc)
self.verify_vpc_vm_ingress_traffic(
internal_vm_2_1, internal_tier_2, vpc)
self.verify_vpc_vm_ingress_traffic(
internal_vm_2_2, internal_tier_2, vpc)
# Creating Static NAT rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(
public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsd_floating_ip(
public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule "
"(SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(
self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsd_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic tests
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file_1 = self.wget_from_vm_cmd(
ssh_client,
int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file_2 = self.wget_from_vm_cmd(
ssh_client,
int_lb_rule_1.sourceipaddress,
http_rule["publicport"])
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file_3 = self.wget_from_vm_cmd(
ssh_client,
int_lb_rule_4.sourceipaddress,
self.test_data["http_rule"]["publicport"])
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file_4 = self.wget_from_vm_cmd(
ssh_client,
int_lb_rule_4.sourceipaddress,
http_rule["publicport"])
# Verifying Internal LB (wget) traffic tests
self.verify_lb_wget_file(
wget_file_1, [internal_vm_1, internal_vm_1_1, internal_vm_1_2])
self.verify_lb_wget_file(
wget_file_2, [internal_vm_1, internal_vm_1_1, internal_vm_1_2])
self.verify_lb_wget_file(
wget_file_3, [internal_vm_2, internal_vm_2_1, internal_vm_2_2])
self.verify_lb_wget_file(
wget_file_4, [internal_vm_2, internal_vm_2_1, internal_vm_2_2])
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_06_nuage_internallb_algorithms_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality with different LB
algorithms by performing (wget) traffic tests within a VPC
"""
# Repeat the tests in the testcase "test_05_nuage_internallb_traffic"
# with different Internal LB algorithms:
# 1. Round Robin
# 2. Least connections
# 3. Source
# Verify the above Internal LB algorithms by performing multiple (wget)
# traffic tests within a VPC.
# Delete all the created objects (cleanup).
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB "
"service...")
vpc_off = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
"service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal "
"LB service...")
net_off_2 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(
net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(
net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(public_vm)
# Creating Internal LB Rules in the Internal tier with Round Robin
# Algorithm
self.debug("Creating two Internal LB Rules (SSH & HTTP) with Round "
"Robin Algorithm...")
int_lb_rule_1 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm_1 = self.get_InternalLbVm(
internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_1)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" %
internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# VSD verification
self.verify_vsd_vm(internal_vm_1)
self.verify_vsd_vm(internal_vm_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal "
"LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_1)
# Creating Internal LB Rules in the Internal tier with Least
# connections Algorithm
self.debug("Creating two Internal LB Rules (SSH & HTTP) with Least "
"connections Algorithm...")
self.test_data["internal_lbrule"]["algorithm"] = "leastconn"
int_lb_rule_3 = self.create_Internal_LB_Rule(
internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule"])
self.validate_Internal_LB_Rule(
int_lb_rule_3, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
self.test_data["internal_lbrule_http"]["algorithm"] = "leastconn"
int_lb_rule_4 = self.create_Internal_LB_Rule(
internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_3.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_4, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm deployment and state
int_lb_vm_2 = self.get_InternalLbVm(
internal_tier, int_lb_rule_3.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_3.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_2)
# Creating Internal LB Rules in the Internal tier with Source Algorithm
self.debug("Creating two Internal LB Rules (SSH & HTTP) with Source "
"Algorithm...")
self.test_data["internal_lbrule"]["algorithm"] = "source"
int_lb_rule_5 = self.create_Internal_LB_Rule(
internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule"])
self.validate_Internal_LB_Rule(
int_lb_rule_5, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
self.test_data["internal_lbrule_http"]["algorithm"] = "source"
int_lb_rule_6 = self.create_Internal_LB_Rule(
internal_tier,
vm_array=[internal_vm, internal_vm_1, internal_vm_2],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_5.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_6, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm deployment and state
int_lb_vm_3 = self.get_InternalLbVm(
internal_tier, int_lb_rule_5.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_5.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm_3)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB "
"rules (HTTP) accessible...")
http_rule = self.create_NetworkAclRule(
self.test_data["http_rule"], network=internal_tier)
# VSD verification
self.verify_vsd_firewall_rule(http_rule)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Creating Static NAT rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(
public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsd_floating_ip(
public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule "
"(SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(
self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsd_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic tests with Round Robin Algorithm
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.validate_internallb_algorithm_traffic(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"],
[internal_vm, internal_vm_1, internal_vm_2], "roundrobin")
# Internal LB (wget) traffic tests with Least connections Algorithm
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.validate_internallb_algorithm_traffic(
ssh_client, int_lb_rule_3.sourceipaddress,
self.test_data["http_rule"]["publicport"],
[internal_vm, internal_vm_1, internal_vm_2], "leastconn")
# Internal LB (wget) traffic tests with Source Algorithm
ssh_client = self.ssh_into_VM(public_vm, public_ip)
self.validate_internallb_algorithm_traffic(
ssh_client, int_lb_rule_5.sourceipaddress,
self.test_data["http_rule"]["publicport"],
[internal_vm, internal_vm_1, internal_vm_2], "source")
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_07_nuage_internallb_vpc_network_restarts_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality with restarts of VPC
network components by performing (wget) traffic tests within a VPC
"""
# Repeat the tests in the testcase "test_05_nuage_internallb_traffic"
# with restarts of VPC networks (tiers):
# 1. Restart tier with InternalLbVm (cleanup = false), verify that the
# InternalLbVm gets destroyed and deployed again in the Internal
# tier.
# 2. Restart tier with InternalLbVm (cleanup = true), verify that the
# InternalLbVm gets destroyed and deployed again in the Internal
# tier.
# 3. Restart tier without InternalLbVm (cleanup = false), verify that
# this restart has no effect on the InternalLbVm functionality.
# 4. Restart tier without InternalLbVm (cleanup = true), verify that
# this restart has no effect on the InternalLbVm functionality.
# 5. Stop all the VMs configured with InternalLbVm, verify that the
# InternalLbVm gets destroyed in the Internal tier.
# 6. Start all the VMs configured with InternalLbVm, verify that the
# InternalLbVm gets deployed again in the Internal tier.
# 7. Restart VPC (cleanup = false), verify that the VPC VR gets
# rebooted and this restart has no effect on the InternalLbVm
# functionality.
# 7. Restart VPC (cleanup = true), verify that the VPC VR gets rebooted
# and this restart has no effect on the InternalLbVm functionality.
# Verify the above restarts of VPC networks (tiers) by performing
# (wget) traffic tests within a VPC.
# Delete all the created objects (cleanup).
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB "
"service...")
vpc_off = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
"service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal "
"LB service...")
net_off_2 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(
net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(
net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(public_vm)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the "
"same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(
internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" %
internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# VSD verification
self.verify_vsd_vm(internal_vm_1)
self.verify_vsd_vm(internal_vm_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal "
"LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB "
"rules (HTTP) accessible...")
http_rule = self.create_NetworkAclRule(
self.test_data["http_rule"], network=internal_tier)
# VSD verification
self.verify_vsd_firewall_rule(http_rule)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Creating Static NAT rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(
public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsd_floating_ip(
public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule "
"(SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(
self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsd_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Internal tier (cleanup = false)
# InternalLbVm gets destroyed and deployed again in the Internal tier
self.debug("Restarting the Internal tier without cleanup...")
Network.restart(internal_tier, self.api_client, cleanup=False)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm)
self.verify_vsd_vm(internal_vm_1)
self.verify_vsd_vm(internal_vm_2)
self.verify_vsd_firewall_rule(http_rule)
# Validating InternalLbVm state
# InternalLbVm gets destroyed and deployed again in the Internal tier
int_lb_vm = self.get_InternalLbVm(
internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Internal tier (cleanup = true)
# InternalLbVm gets destroyed and deployed again in the Internal tier
self.debug("Restarting the Internal tier with cleanup...")
Network.restart(internal_tier, self.api_client, cleanup=True)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm)
self.verify_vsd_vm(internal_vm_1)
self.verify_vsd_vm(internal_vm_2)
self.verify_vsd_firewall_rule(http_rule)
# Validating InternalLbVm state
# InternalLbVm gets destroyed and deployed again in the Internal tier
int_lb_vm = self.get_InternalLbVm(
internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Public tier (cleanup = false)
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the Public tier without cleanup...")
Network.restart(public_tier, self.api_client, cleanup=False)
self.validate_Network(public_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.validate_PublicIPAddress(
public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(public_vm)
self.verify_vsd_floating_ip(
public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsd_firewall_rule(public_ssh_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restart Public tier (cleanup = true)
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the Public tier with cleanup...")
Network.restart(public_tier, self.api_client, cleanup=True)
self.validate_Network(public_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.validate_PublicIPAddress(
public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(public_vm)
self.verify_vsd_floating_ip(
public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsd_firewall_rule(public_ssh_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Stopping VMs in the Internal tier
# wget traffic test fails as all the VMs in the Internal tier are in
# stopped state
self.debug("Stopping all the VMs in the Internal tier...")
internal_vm.stop(self.api_client)
internal_vm_1.stop(self.api_client)
internal_vm_2.stop(self.api_client)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Stopped")
self.check_VM_state(internal_vm_1, state="Stopped")
self.check_VM_state(internal_vm_2, state="Stopped")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm, stopped=True)
self.verify_vsd_vm(internal_vm_1, stopped=True)
self.verify_vsd_vm(internal_vm_2, stopped=True)
self.verify_vsd_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
with self.assertRaises(Exception):
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
self.debug("Failed to wget file as all the VMs in the Internal tier "
"are in stopped state")
# Starting VMs in the Internal tier
# wget traffic test succeeds as all the VMs in the Internal tier are
# back in running state
self.debug("Starting all the VMs in the Internal tier...")
internal_vm.start(self.api_client)
internal_vm_1.start(self.api_client)
internal_vm_2.start(self.api_client)
self.validate_Network(internal_tier, state="Implemented")
self.check_Router_state(vr, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm)
self.verify_vsd_vm(internal_vm_1)
self.verify_vsd_vm(internal_vm_2)
self.verify_vsd_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
tries = 0
while tries < 25:
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
if wget_file != "":
break
self.debug("Waiting for the InternalLbVm and all the VMs in the "
"Internal tier to be fully resolved for (wget) traffic "
"test...")
time.sleep(60)
tries += 1
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restarting VPC (cleanup = false)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the VPC without cleanup...")
self.restart_Vpc(vpc, cleanup=False)
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
self.validate_PublicIPAddress(
public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(public_vm)
self.verify_vsd_vm(internal_vm)
self.verify_vsd_vm(internal_vm_1)
self.verify_vsd_vm(internal_vm_2)
self.verify_vsd_floating_ip(
public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsd_firewall_rule(public_ssh_rule)
self.verify_vsd_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Restarting VPC (cleanup = true)
# VPC VR gets destroyed and deployed again in the VPC
# This restart has no effect on the InternalLbVm functionality
self.debug("Restarting the VPC with cleanup...")
self.restart_Vpc(vpc, cleanup=True)
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.check_VM_state(public_vm, state="Running")
self.check_VM_state(internal_vm, state="Running")
self.check_VM_state(internal_vm_1, state="Running")
self.check_VM_state(internal_vm_2, state="Running")
self.validate_PublicIPAddress(
public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(public_vm)
self.verify_vsd_vm(internal_vm)
self.verify_vsd_vm(internal_vm_1)
self.verify_vsd_vm(internal_vm_2)
self.verify_vsd_floating_ip(
public_tier, public_vm, public_ip.ipaddress, vpc)
self.verify_vsd_firewall_rule(public_ssh_rule)
self.verify_vsd_firewall_rule(http_rule)
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
@attr(tags=["advanced", "nuagevsp"], required_hardware="true")
def test_08_nuage_internallb_appliance_operations_traffic(self):
"""Test Nuage VSP VPC Internal LB functionality with InternalLbVm
appliance operations by performing (wget) traffic tests within a VPC
"""
# Repeat the tests in the testcase "test_05_nuage_internallb_traffic"
# with InternalLbVm appliance operations:
# 1. Verify the InternalLbVm deployment by creating the Internal LB
# Rules when the VPC VR is in Stopped state, VPC VR has no effect on
# the InternalLbVm functionality.
# 2. Stop the InternalLbVm when the VPC VR is in Stopped State
# 3. Start the InternalLbVm when the VPC VR is in Stopped state
# 4. Stop the InternalLbVm when the VPC VR is in Running State
# 5. Start the InternalLbVm when the VPC VR is in Running state
# 6. Force stop the InternalLbVm when the VPC VR is in Running State
# 7. Start the InternalLbVm when the VPC VR is in Running state
# Verify the above restarts of VPC networks by performing (wget)
# traffic tests within a VPC.
# Delete all the created objects (cleanup).
# Creating a VPC offering
self.debug("Creating Nuage VSP VPC offering with Internal LB "
"service...")
vpc_off = self.create_VpcOffering(
self.test_data["nuagevsp"]["vpc_offering_lb"])
self.validate_VpcOffering(vpc_off, state="Enabled")
# Creating a VPC
self.debug("Creating a VPC with Internal LB service...")
vpc = self.create_Vpc(vpc_off, cidr='10.1.0.0/16')
self.validate_Vpc(vpc, state="Enabled")
# Creating network offerings
self.debug("Creating Nuage VSP VPC Network offering with Internal LB "
"service...")
net_off_1 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering_internal_lb"])
self.validate_NetworkOffering(net_off_1, state="Enabled")
self.debug("Creating Nuage VSP VPC Network offering without Internal "
"LB service...")
net_off_2 = self.create_NetworkOffering(
self.test_data["nuagevsp"]["vpc_network_offering"])
self.validate_NetworkOffering(net_off_2, state="Enabled")
# Creating VPC networks in the VPC, and deploying VMs
self.debug("Creating a VPC network with Internal LB service...")
internal_tier = self.create_Network(
net_off_1, gateway='10.1.1.1', vpc=vpc)
self.validate_Network(internal_tier, state="Implemented")
vr = self.get_Router(internal_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
internal_vm = self.create_VM(internal_tier)
self.check_VM_state(internal_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(internal_vm)
self.debug("Creating a VPC network without Internal LB service...")
public_tier = self.create_Network(
net_off_2, gateway='10.1.2.1', vpc=vpc)
self.validate_Network(public_tier, state="Implemented")
vr = self.get_Router(public_tier)
self.check_Router_state(vr, state="Running")
self.debug("Deploying a VM in the created VPC network...")
public_vm = self.create_VM(public_tier)
self.check_VM_state(public_vm, state="Running")
# VSD verification
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_router(vr)
self.verify_vsd_vm(public_vm)
# Stopping the VPC VR
# VPC VR has no effect on the InternalLbVm functionality
Router.stop(self.api_client, id=vr.id)
self.check_Router_state(vr, state="Stopped")
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
# VSD verification
self.verify_vsd_router(vr, stopped=True)
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
# Creating Internal LB Rules in the Internal tier
self.debug("Creating two Internal LB Rules (SSH & HTTP) using the "
"same Load Balancing source IP Address...")
int_lb_rule_1 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active", vm_array=[internal_vm])
int_lb_rule_2 = self.create_Internal_LB_Rule(
internal_tier, vm_array=[internal_vm],
services=self.test_data["internal_lbrule_http"],
source_ip=int_lb_rule_1.sourceipaddress)
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active", vm_array=[internal_vm])
# Validating InternalLbVm deployment and state
int_lb_vm = self.get_InternalLbVm(
internal_tier, int_lb_rule_1.sourceipaddress)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Deploying more VMs in the Internal tier
self.debug("Deploying two more VMs in network - %s" %
internal_tier.name)
internal_vm_1 = self.create_VM(internal_tier)
internal_vm_2 = self.create_VM(internal_tier)
# VSD verification
self.verify_vsd_vm(internal_vm_1)
self.verify_vsd_vm(internal_vm_2)
# Adding newly deployed VMs to the created Internal LB rules
self.debug("Adding two more virtual machines to the created Internal "
"LB rules...")
int_lb_rule_1.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(
int_lb_rule_1, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
int_lb_rule_2.assign(self.api_client, [internal_vm_1, internal_vm_2])
self.validate_Internal_LB_Rule(
int_lb_rule_2, state="Active",
vm_array=[internal_vm, internal_vm_1, internal_vm_2])
# Validating InternalLbVm state
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Adding Network ACL rules in the Internal tier
self.debug("Adding Network ACL rules to make the created Internal LB "
"rules (HTTP) accessible...")
http_rule = self.create_NetworkAclRule(
self.test_data["http_rule"], network=internal_tier)
# VSD verification
self.verify_vsd_firewall_rule(http_rule)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Creating Static NAT rule for the VM in the Public tier
public_ip = self.acquire_PublicIPAddress(public_tier, vpc)
self.validate_PublicIPAddress(public_ip, public_tier)
self.create_StaticNatRule_For_VM(public_vm, public_ip, public_tier)
self.validate_PublicIPAddress(
public_ip, public_tier, static_nat=True, vm=public_vm)
# VSD verification
self.verify_vsd_floating_ip(
public_tier, public_vm, public_ip.ipaddress, vpc)
# Adding Network ACL rule in the Public tier
self.debug("Adding Network ACL rule to make the created NAT rule "
"(SSH) accessible...")
public_ssh_rule = self.create_NetworkAclRule(
self.test_data["ingress_rule"], network=public_tier)
# VSD verification
self.verify_vsd_firewall_rule(public_ssh_rule)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# # Stopping the InternalLbVm when the VPC VR is in Stopped state
self.stop_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm, stopped=True)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
with self.assertRaises(Exception):
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
self.debug("Failed to wget file as the InternalLbVm is in stopped"
" state")
# # Starting the InternalLbVm when the VPC VR is in Stopped state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# Starting the VPC VR
# VPC VR has no effect on the InternalLbVm functionality
Router.start(self.api_client, id=vr.id)
self.check_Router_state(vr)
self.validate_Network(public_tier, state="Implemented")
self.validate_Network(internal_tier, state="Implemented")
# VSD verification
self.verify_vsd_router(vr)
self.verify_vsd_network(self.domain.id, public_tier, vpc)
self.verify_vsd_network(self.domain.id, internal_tier, vpc)
# # Stopping the InternalLbVm when the VPC VR is in Running state
self.stop_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm, stopped=True)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
with self.assertRaises(Exception):
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
self.debug("Failed to wget file as the InternalLbVm is in stopped"
" state")
# # Starting the InternalLbVm when the VPC VR is in Running state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
# # Force Stopping the InternalLbVm when the VPC VR is in Running state
self.stop_InternalLbVm(int_lb_vm, force=True)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Stopped")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm, stopped=True)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
with self.assertRaises(Exception):
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
self.debug("Failed to wget file as the InternalLbVm is in stopped"
" state")
# # Starting the InternalLbVm when the VPC VR is in Running state
self.start_InternalLbVm(int_lb_vm)
self.check_InternalLbVm_state(
internal_tier, int_lb_rule_1.sourceipaddress, state="Running")
# VSD Verification
self.verify_vsd_lb_device(int_lb_vm)
# Verifying Internal Load Balanced VMs ingress traffic
# (SSH into VM via Static NAT rule)
self.debug("Verifying Internal Load Balanced VMs ingress traffic "
"(SSH into VM via Static NAT rule)...")
self.verify_vpc_vm_ingress_traffic(internal_vm, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_1, internal_tier, vpc)
self.verify_vpc_vm_ingress_traffic(internal_vm_2, internal_tier, vpc)
# Internal LB (wget) traffic test
ssh_client = self.ssh_into_VM(public_vm, public_ip)
wget_file = self.wget_from_vm_cmd(
ssh_client, int_lb_rule_1.sourceipaddress,
self.test_data["http_rule"]["publicport"])
# Verifying Internal LB (wget) traffic test
self.verify_lb_wget_file(
wget_file, [internal_vm, internal_vm_1, internal_vm_2])
|
resmo/cloudstack
|
test/integration/plugins/nuagevsp/test_nuage_vpc_internal_lb.py
|
Python
|
apache-2.0
| 123,514
|
#!/usr/bin/env python3
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import sys
file_names = set()
def parse_input(input_file):
global file_names
while True:
line_buffer = input_file.readline()
if not line_buffer:
break
line_match = re.search(r"^\s*---\s+([^\s@]+)[\s@]+", line_buffer)
if not line_match:
line_match = re.search(r"^\s*\+\+\+\s+([^\s@]+)[\s@]+",
line_buffer)
if line_match:
curr_file_name = line_match.group(1)
# trim off 'a/' and 'b/' that you will normally see in git output
#
if len(curr_file_name) > 2 and curr_file_name[1] == '/' and (
curr_file_name[0] == 'a' or curr_file_name[0] == 'b'):
curr_file_name = curr_file_name[2:]
file_names.add(curr_file_name)
def prune_unwanted_names():
global file_names
unwanted_names = set(['/dev/null'])
for curr_file_name in file_names:
# ignore files that end in '.orig' as long as non-.orig exists
line_match = re.search(r"^(.+)\.[oO][Rr][iI][gG]$", curr_file_name)
if line_match and line_match.group(1) in file_names:
unwanted_names.add(curr_file_name)
continue
file_names -= unwanted_names
def print_file_names():
for name in sorted(file_names):
print(name)
if __name__ == '__main__':
if len(sys.argv) == 1:
parse_input(sys.stdin)
else:
for curr_input_name in sys.argv[1:]:
try:
with open(curr_input_name, 'r') as curr_input_file:
parse_input(curr_input_file)
except IOError as e_str:
sys.stderr.write(
"Cannot open {}: {}\n".format(curr_input_name, e_str))
sys.exit(255)
prune_unwanted_names()
print_file_names()
|
openstack/neutron
|
tools/files_in_patch.py
|
Python
|
apache-2.0
| 2,508
|
import re
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, FormRequest, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
from scrapy.http import FormRequest
from productloader import load_product
import re
class CartridgeSave(BaseSpider):
name = 'cartridgesave.co.uk'
allowed_domains = ['cartridgesave.co.uk', 'www.cartridgesave.co.uk']
start_urls = ('http://www.cartridgesave.co.uk',)
def __init__(self, *args, **kwargs):
super(CartridgeSave, self).__init__(*args, **kwargs)
self.URL_BASE = 'http://www.cartridgesave.co.uk'
self.product_name_re = re.compile('.*/(.*?)\.html')
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
res = {}
try:
# name = hxs.select('//div[@id="specification"]/ul/li[position()=1]').re('.* \((.*)\)')[0]
url = response.url
name = self.product_name_re.search(url).groups()[0]
price = hxs.select('.//span[@class="ex_vat_price"]/text()').re('\xa3(.*)')[0]
res['url'] = url
res['description'] = name
res['price'] = price
res['sku'] = res['description']
yield load_product(res, response)
except IndexError:
return
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
#categories
hxs = HtmlXPathSelector(response)
# printer brands
printers_brands = hxs.select('//div[@id="manufacturers"]//li/a/@href').extract()
for url in printers_brands:
url = urljoin_rfc(self.URL_BASE, url)
yield Request(url)
# printer list
printers_list = hxs.select('//ul[@class="printer_list"]//li/a/@href').extract()
for url in printers_list:
url = urljoin_rfc(self.URL_BASE, url)
yield Request(url)
# next page
# next_page =
# if next_page:
# url = urljoin_rfc(URL_BASE, next_page[0])
# yield Request(url)
# products
products = hxs.select('//div[@class="group_products"]//li/a[not(@class="lowest_price info")]/@href').extract()
for product in products:
product = urljoin_rfc(self.URL_BASE, product)
yield Request(product, callback=self.parse_product)
|
0--key/lib
|
portfolio/Python/scrapy/inkshop/cartridgesavecouk.py
|
Python
|
apache-2.0
| 2,521
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
class PSDispatcher(object):
"""
PSDispatcher is the base class for dispatching vars
into different pserver instance.
You need to implement the `dispatch` interface.
"""
def __init__(self, pserver_endpoints):
self._eps = pserver_endpoints
self._step = 0
@property
def eps(self):
return self._eps
def reset(self):
"""
reset the step counter, set it zero.
"""
self._step = 0
def dispatch(self, varlist):
"""
Args:
varlist(list): a list of Variables
Returns:
a map of pserver endpoint -> varname
"""
raise NotImplementedError("Interface has not been implemented.")
class HashName(PSDispatcher):
"""
Hash variable names to several endpoints using python
"hash()" function.
Args:
pserver_endpoints (list): list of endpoint(ip:port).
Examples:
.. code-block:: python
pserver_endpoints = ["127.0.0.1:6007", "127.0.0.1:6008"]
vars = ["var1","var2","var3","var4","var5"]
rr = RoundRobin(pserver_endpoints)
rr.dispatch(vars)
"""
def __init__(self, pserver_endpoints):
super(HashName, self).__init__(pserver_endpoints)
def _hash_block(self, block_str, total):
return hash(block_str) % total
def dispatch(self, varlist):
"""
use `HashName` method to dispatch variables with each parameter server.
Args:
varlist (list): a list of Variables
"""
eplist = []
for var in varlist:
server_id = self._hash_block(var.name(), len(self._eps))
server_for_param = self._eps[server_id]
eplist.append(server_for_param)
return eplist
class RoundRobin(PSDispatcher):
"""
Distribute variables to several endpoints using
RondRobin<https://en.wikipedia.org/wiki/Round-robin_scheduling> method.
Args:
pserver_endpoints (list): list of endpoint(ip:port).
Examples:
.. code-block:: python
pserver_endpoints = ["127.0.0.1:6007", "127.0.0.1:6008"]
vars = ["var1","var2","var3","var4","var5"]
rr = RoundRobin(pserver_endpoints)
rr.dispatch(vars)
"""
def __init__(self, pserver_endpoints):
super(RoundRobin, self).__init__(pserver_endpoints)
def dispatch(self, varlist):
"""
use `RoundRobin` method to dispatch variables with each parameter server.
Args:
varlist (list): a list of Variables
"""
eplist = []
for var in varlist:
server_for_param = self._eps[self._step]
eplist.append(server_for_param)
self._step += 1
if self._step >= len(self._eps):
self._step = 0
return eplist
|
luotao1/Paddle
|
python/paddle/fluid/incubate/fleet/parameter_server/ir/ps_dispatcher.py
|
Python
|
apache-2.0
| 3,500
|
# (c) Copyright 2014 Cisco Systems Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Script to push the zone configuration to Cisco SAN switches.
"""
import random
import re
from eventlet import greenthread
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import excutils
import six
from cinder import exception
from cinder.i18n import _
from cinder import ssh_utils
from cinder import utils
from cinder.zonemanager.drivers.cisco import exception as c_exception
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
LOG = logging.getLogger(__name__)
class CiscoFCZoneClientCLI(object):
"""Cisco FC zone client cli implementation.
OpenStack Fibre Channel zone client cli connector
to manage FC zoning in Cisco SAN fabrics.
Version history:
1.0 - Initial Cisco FC zone client cli
"""
switch_ip = None
switch_port = '22'
switch_user = 'admin'
switch_pwd = 'none'
def __init__(self, ipaddress, username, password, port, vsan):
"""initializing the client."""
self.switch_ip = ipaddress
self.switch_port = port
self.switch_user = username
self.switch_pwd = password
self.fabric_vsan = vsan
self.sshpool = None
def get_active_zone_set(self):
"""Return the active zone configuration.
Return active zoneset from fabric. When none of the configurations
are active then it will return empty map.
:returns: Map -- active zone set map in the following format
.. code-block:: python
{
'zones':
{'openstack50060b0000c26604201900051ee8e329':
['50060b0000c26604', '201900051ee8e329']
},
'active_zone_config': 'OpenStack_Cfg'
}
"""
zone_set = {}
zone = {}
zone_member = None
zone_name = None
switch_data = None
zone_set_name = None
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan,
' | no-more'])
except c_exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error("Failed getting active zone set "
"from fabric %s", self.switch_ip)
try:
for line in switch_data:
# Split on non-word characters,
line_split = re.split(r'[\s\[\]]+', line)
if ZoneConstant.CFG_ZONESET in line_split:
# zoneset name [name] vsan [vsan]
zone_set_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONESET)
+ 2]
continue
if ZoneConstant.CFG_ZONE in line_split:
# zone name [name] vsan [vsan]
zone_name = \
line_split[line_split.index(ZoneConstant.CFG_ZONE) + 2]
zone[zone_name] = list()
continue
if ZoneConstant.CFG_ZONE_MEMBER in line_split:
# Examples:
# pwwn c0:50:76:05:15:9f:00:12
# * fcid 0x1e01c0 [pwwn 50:05:07:68:02:20:48:04] [V7K_N1P2]
zone_member = \
line_split[
line_split.index(ZoneConstant.CFG_ZONE_MEMBER) + 1]
zone_member_list = zone.get(zone_name)
zone_member_list.append(zone_member)
zone_set[ZoneConstant.CFG_ZONES] = zone
zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG] = zone_set_name
except Exception as ex:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone configuration: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_config': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.error(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_set
def add_zones(self, zones, activate, fabric_vsan, active_zone_set,
zone_status):
"""Add zone configuration.
This method will add the zone configuration passed by user.
:param zones: Zone names mapped to members and VSANs
Zone members are colon separated but case-insensitive
.. code-block:: python
{ zonename1:[zonememeber1,zonemember2,...],
zonename2:[zonemember1, zonemember2,...]...}
e.g:
{
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']
}
:param activate: True will activate the zone config.
:param fabric_vsan:
:param active_zone_set: Active zone set dict retrieved from
get_active_zone_set method
:param zone_status: Status of the zone
:raises CiscoZoningCliException:
"""
LOG.debug("Add Zones - Zones passed: %s", zones)
LOG.debug("Active zone set: %s", active_zone_set)
zone_list = active_zone_set[ZoneConstant.CFG_ZONES]
LOG.debug("zone list: %s", zone_list)
LOG.debug("zone status: %s", zone_status)
cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
zone_cmds = [['conf'],
['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]]
for zone in zones.keys():
zone_cmds.append(['zone', 'name', zone])
for member in zones[zone]:
zone_cmds.append(['member', 'pwwn', member])
zone_cmds.append(['end'])
try:
LOG.debug("Add zones: Config cmd to run: %s", zone_cmds)
self._ssh_execute(zone_cmds, True, 1)
if activate:
self.activate_zoneset(cfg_name, fabric_vsan, zone_status)
self._cfg_save()
except Exception as e:
msg = _("Creating and activating zone set failed: "
"(Zone set=%(zoneset)s error=%(err)s)."
) % {'zoneset': cfg_name, 'err': six.text_type(e)}
LOG.error(msg)
raise c_exception.CiscoZoningCliException(reason=msg)
def update_zones(self, zones, activate, fabric_vsan, operation,
active_zone_set, zone_status):
"""Update the zone configuration.
This method will update the zone configuration passed by user.
:param zones: zone names mapped to members. Zone members
are colon separated but case-insensitive
.. code-block:: python
{ zonename1:[zonememeber1, zonemember2,...],
zonename2:[zonemember1, zonemember2,...]...}
e.g:
{
'openstack50060b0000c26604201900051ee8e329':
['50:06:0b:00:00:c2:66:04',
'20:19:00:05:1e:e8:e3:29']
}
:param activate: True will activate the zone config.
:param operation: zone add or zone remove
:param fabric_vsan: Virtual San #
:param active_zone_set: Active zone set dict retrieved from
get_active_zone_set method
:param zone_status: Status of the zone
:raises CiscoZoningCliException:
"""
LOG.debug("Update Zones - Operation: %(op)s - Zones "
"passed: %(zones)s",
{'op': operation, 'zones': zones})
cfg_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
zone_cmds = [['conf'],
['zoneset', 'name', cfg_name, 'vsan', fabric_vsan]]
zone_mod_cmd = []
if operation == ZoneConstant.ZONE_ADD:
zone_mod_cmd = ['member', 'pwwn']
elif operation == ZoneConstant.ZONE_REMOVE:
zone_mod_cmd = ['no', 'member', 'pwwn']
for zone, zone_members in zones.items():
zone_cmds.append(['zone', 'name', zone])
for member in zone_members:
zone_cmds.append(zone_mod_cmd + [member])
zone_cmds.append(['end'])
try:
LOG.debug("Update zones: Config cmd to run: %s", zone_cmds)
self._ssh_execute(zone_cmds, True, 1)
if activate:
self.activate_zoneset(cfg_name, fabric_vsan, zone_status)
self._cfg_save()
except Exception as e:
msg = (_("Updating and activating zone set failed: "
"(Zone set=%(zoneset)s error=%(err)s).")
% {'zoneset': cfg_name, 'err': six.text_type(e)})
LOG.error(msg)
raise c_exception.CiscoZoningCliException(reason=msg)
def activate_zoneset(self, cfgname, fabric_vsan, zone_status):
"""Method to Activate the zone config. Param cfgname - ZonesetName."""
LOG.debug("zone status: %s", zone_status)
cmd_list = [['conf'],
['zoneset', 'activate', 'name', cfgname, 'vsan',
self.fabric_vsan]]
if zone_status['mode'] == 'enhanced':
cmd_list.append(['zone', 'commit', 'vsan', fabric_vsan])
cmd_list.append(['end'])
return self._ssh_execute(cmd_list, True, 1)
def get_zoning_status(self):
"""Return the zoning mode and session for a zoneset."""
zone_status = {}
try:
switch_data = self._get_switch_info(
[ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan])
except c_exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error("Failed getting zone status "
"from fabric %s", self.switch_ip)
try:
for line in switch_data:
# Split on non-word characters,
line_split = re.split(r'[\s\[\]]+', line)
if 'mode:' in line_split:
# mode: <enhanced|basic>
zone_status['mode'] = line_split[line_split.index('mode:')
+ 1]
continue
if 'session:' in line_split:
# session: <none|a value other than none>
zone_status['session'] = \
line_split[line_split.index('session:') + 1]
continue
except Exception as ex:
# In case of parsing error here, it should be malformed cli output.
msg = _("Malformed zone status: (switch=%(switch)s "
"zone_config=%(zone_config)s)."
) % {'switch': self.switch_ip,
'zone_status': switch_data}
LOG.error(msg)
exc_msg = _("Exception: %s") % six.text_type(ex)
LOG.error(exc_msg)
raise exception.FCZoneDriverException(reason=msg)
return zone_status
def delete_zones(self, zone_names, activate, fabric_vsan, active_zone_set,
zone_status):
"""Delete zones from fabric.
Method to delete the active zone config zones
params zone_names: zoneNames separated by semicolon
params activate: True/False
"""
LOG.debug("zone_names %s", zone_names)
active_zoneset_name = active_zone_set[ZoneConstant.ACTIVE_ZONE_CONFIG]
cmds = [['conf'],
['zoneset', 'name', active_zoneset_name, 'vsan',
fabric_vsan]]
try:
for zone in set(zone_names.split(';')):
cmds.append(['no', 'zone', 'name', zone])
cmds.append(['end'])
LOG.debug("Delete zones: Config cmd to run: %s", cmds)
self._ssh_execute(cmds, True, 1)
if activate:
self.activate_zoneset(active_zoneset_name, fabric_vsan,
zone_status)
self._cfg_save()
except Exception as e:
msg = _("Deleting zones failed: (command=%(cmd)s error=%(err)s)."
) % {'cmd': cmds, 'err': six.text_type(e)}
LOG.error(msg)
raise c_exception.CiscoZoningCliException(reason=msg)
def get_nameserver_info(self):
"""Get name server data from fabric.
This method will return the connected node port wwn list(local
and remote) for the given switch fabric
show fcns database
"""
cli_output = None
return_list = []
try:
cli_output = self._get_switch_info([ZoneConstant.FCNS_SHOW,
self.fabric_vsan])
except c_exception.CiscoZoningCliException:
with excutils.save_and_reraise_exception():
LOG.error("Failed collecting fcns database "
"info for fabric %s", self.switch_ip)
if (cli_output):
return_list = self._parse_ns_output(cli_output)
LOG.info("Connector returning fcnsinfo-%s", return_list)
return return_list
@utils.retry(processutils.ProcessExecutionError, retries=5)
def _cfg_save(self):
cmd = ['copy', 'running-config', 'startup-config']
self._run_ssh(cmd, True)
def _get_switch_info(self, cmd_list):
stdout, stderr, sw_data = None, None, None
try:
stdout, stderr = self._run_ssh(cmd_list, True)
LOG.debug("CLI output from ssh - output: %s", stdout)
if (stdout):
sw_data = stdout.splitlines()
return sw_data
except processutils.ProcessExecutionError as e:
msg = _("Error while getting data via ssh: (command=%(cmd)s "
"error=%(err)s).") % {'cmd': cmd_list,
'err': six.text_type(e)}
LOG.error(msg)
raise c_exception.CiscoZoningCliException(reason=msg)
def _parse_ns_output(self, switch_data):
"""Parses name server data.
Parses nameserver raw data and adds the device port wwns to the list
:returns: List -- list of device port wwn from ns info
"""
return_list = []
for line in switch_data:
if not(" N " in line):
continue
linesplit = line.split()
if len(linesplit) > 2:
node_port_wwn = linesplit[2]
return_list.append(node_port_wwn)
else:
msg = _("Malformed show fcns database string: %s") % line
LOG.error(msg)
raise exception.InvalidParameterValue(err=msg)
return return_list
def _run_ssh(self, cmd_list, check_exit_code=True):
command = ' '.join(cmd_list)
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
try:
with self.sshpool.item() as ssh:
return processutils.ssh_execute(
ssh,
command,
check_exit_code=check_exit_code)
except Exception:
with excutils.save_and_reraise_exception():
LOG.warning("Error running SSH command: %s", command)
def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
"""Execute cli with status update.
Executes CLI commands where status return is expected.
cmd_list is a list of commands, where each command is itself
a list of parameters. We use utils.check_ssh_injection to check each
command, but then join then with " ; " to form a single command.
"""
# Check that each command is secure
for cmd in cmd_list:
utils.check_ssh_injection(cmd)
# Combine into a single command.
command = ' ; '.join(map(lambda x: ' '.join(x), cmd_list))
if not self.sshpool:
self.sshpool = ssh_utils.SSHPool(self.switch_ip,
self.switch_port,
None,
self.switch_user,
self.switch_pwd,
min_size=1,
max_size=5)
stdin, stdout, stderr = None, None, None
LOG.debug("Executing command via ssh: %s", command)
last_exception = None
try:
with self.sshpool.item() as ssh:
while attempts > 0:
attempts -= 1
try:
stdin, stdout, stderr = ssh.exec_command(command)
channel = stdout.channel
exit_status = channel.recv_exit_status()
LOG.debug("Exit Status from ssh: %s", exit_status)
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s', exit_status)
if check_exit_code and exit_status != 0:
raise processutils.ProcessExecutionError(
exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=command)
else:
return True
else:
return True
except Exception as e:
LOG.exception('Error executing SSH command.')
last_exception = e
greenthread.sleep(random.randint(20, 500) / 100.0)
LOG.debug("Handling error case after SSH: %s", last_exception)
try:
raise processutils.ProcessExecutionError(
exit_code=last_exception.exit_code,
stdout=last_exception.stdout,
stderr=last_exception.stderr,
cmd=last_exception.cmd)
except AttributeError:
raise processutils.ProcessExecutionError(
exit_code=-1,
stdout="",
stderr="Error running SSH command",
cmd=command)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Error executing command via ssh.")
finally:
if stdin:
stdin.flush()
stdin.close()
if stdout:
stdout.close()
if stderr:
stderr.close()
def cleanup(self):
self.sshpool = None
|
openstack/cinder
|
cinder/zonemanager/drivers/cisco/cisco_fc_zone_client_cli.py
|
Python
|
apache-2.0
| 20,088
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_event_source import V1EventSource
class TestV1EventSource(unittest.TestCase):
""" V1EventSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1EventSource(self):
"""
Test V1EventSource
"""
model = kubernetes.client.models.v1_event_source.V1EventSource()
if __name__ == '__main__':
unittest.main()
|
djkonro/client-python
|
kubernetes/test/test_v1_event_source.py
|
Python
|
apache-2.0
| 843
|
# -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for pyspark.sql; additional tests are implemented as doctests in
individual modules.
"""
import os
import sys
import subprocess
import pydoc
import shutil
import tempfile
import pickle
import functools
import time
import datetime
import array
import ctypes
import py4j
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
_have_pandas = False
try:
import pandas
_have_pandas = True
except:
# No Pandas, but that's okay, we'll skip those tests
pass
from pyspark import SparkContext
from pyspark.sql import SparkSession, SQLContext, HiveContext, Column, Row
from pyspark.sql.types import *
from pyspark.sql.types import UserDefinedType, _infer_type, _make_type_verifier
from pyspark.sql.types import _array_signed_int_typecode_ctype_mappings, _array_type_mappings
from pyspark.sql.types import _array_unsigned_int_typecode_ctype_mappings
from pyspark.tests import QuietTest, ReusedPySparkTestCase, SparkSubmitTests
from pyspark.sql.functions import UserDefinedFunction, sha2, lit
from pyspark.sql.window import Window
from pyspark.sql.utils import AnalysisException, ParseException, IllegalArgumentException
_have_arrow = False
try:
import pyarrow
_have_arrow = True
except:
# No Arrow, but that's okay, we'll skip those tests
pass
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.ZERO = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.ZERO
def dst(self, dt):
return self.ZERO
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return 'pyspark.sql.tests'
@classmethod
def scalaUDT(cls):
return 'org.apache.spark.sql.test.ExamplePointUDT'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Scala, Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sqlType(self):
return ArrayType(DoubleType(), False)
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
@staticmethod
def foo():
pass
@property
def props(self):
return {}
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class MyObject(object):
def __init__(self, key, value):
self.key = key
self.value = value
class DataTypeTests(unittest.TestCase):
# regression test for SPARK-6055
def test_data_type_eq(self):
lt = LongType()
lt2 = pickle.loads(pickle.dumps(LongType()))
self.assertEqual(lt, lt2)
# regression test for SPARK-7978
def test_decimal_type(self):
t1 = DecimalType()
t2 = DecimalType(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
t3 = DecimalType(8)
self.assertNotEqual(t2, t3)
# regression test for SPARK-10392
def test_datetype_equal_zero(self):
dt = DateType()
self.assertEqual(dt.fromInternal(0), datetime.date(1970, 1, 1))
# regression test for SPARK-17035
def test_timestamp_microsecond(self):
tst = TimestampType()
self.assertEqual(tst.toInternal(datetime.datetime.max) % 1000000, 999999)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
class SQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.spark = SparkSession(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.spark.createDataFrame(cls.testData)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_sqlcontext_reuses_sparksession(self):
sqlContext1 = SQLContext(self.sc)
sqlContext2 = SQLContext(self.sc)
self.assertTrue(sqlContext1.sparkSession is sqlContext2.sparkSession)
def tearDown(self):
super(SQLTests, self).tearDown()
# tear down test_bucketed_write state
self.spark.sql("DROP TABLE IF EXISTS pyspark_bucket")
def test_row_should_be_read_only(self):
row = Row(a=1, b=2)
self.assertEqual(1, row.a)
def foo():
row.a = 3
self.assertRaises(Exception, foo)
row2 = self.spark.range(10).first()
self.assertEqual(0, row2.id)
def foo2():
row2.id = 2
self.assertRaises(Exception, foo2)
def test_range(self):
self.assertEqual(self.spark.range(1, 1).count(), 0)
self.assertEqual(self.spark.range(1, 0, -1).count(), 1)
self.assertEqual(self.spark.range(0, 1 << 40, 1 << 39).count(), 2)
self.assertEqual(self.spark.range(-2).count(), 0)
self.assertEqual(self.spark.range(3).count(), 3)
def test_duplicated_column_names(self):
df = self.spark.createDataFrame([(1, 2)], ["c", "c"])
row = df.select('*').first()
self.assertEqual(1, row[0])
self.assertEqual(2, row[1])
self.assertEqual("Row(c=1, c=2)", str(row))
# Cannot access columns
self.assertRaises(AnalysisException, lambda: df.select(df[0]).first())
self.assertRaises(AnalysisException, lambda: df.select(df.c).first())
self.assertRaises(AnalysisException, lambda: df.select(df["c"]).first())
def test_column_name_encoding(self):
"""Ensure that created columns has `str` type consistently."""
columns = self.spark.createDataFrame([('Alice', 1)], ['name', u'age']).columns
self.assertEqual(columns, ['name', 'age'])
self.assertTrue(isinstance(columns[0], str))
self.assertTrue(isinstance(columns[1], str))
def test_explode(self):
from pyspark.sql.functions import explode, explode_outer, posexplode_outer
d = [
Row(a=1, intlist=[1, 2, 3], mapfield={"a": "b"}),
Row(a=1, intlist=[], mapfield={}),
Row(a=1, intlist=None, mapfield=None),
]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
result = data.select(explode(data.intlist).alias("a")).select("a").collect()
self.assertEqual(result[0][0], 1)
self.assertEqual(result[1][0], 2)
self.assertEqual(result[2][0], 3)
result = data.select(explode(data.mapfield).alias("a", "b")).select("a", "b").collect()
self.assertEqual(result[0][0], "a")
self.assertEqual(result[0][1], "b")
result = [tuple(x) for x in data.select(posexplode_outer("intlist")).collect()]
self.assertEqual(result, [(0, 1), (1, 2), (2, 3), (None, None), (None, None)])
result = [tuple(x) for x in data.select(posexplode_outer("mapfield")).collect()]
self.assertEqual(result, [(0, 'a', 'b'), (None, None, None), (None, None, None)])
result = [x[0] for x in data.select(explode_outer("intlist")).collect()]
self.assertEqual(result, [1, 2, 3, None, None])
result = [tuple(x) for x in data.select(explode_outer("mapfield")).collect()]
self.assertEqual(result, [('a', 'b'), (None, None), (None, None)])
def test_and_in_expression(self):
self.assertEqual(4, self.df.filter((self.df.key <= 10) & (self.df.value <= "2")).count())
self.assertRaises(ValueError, lambda: (self.df.key <= 10) and (self.df.value <= "2"))
self.assertEqual(14, self.df.filter((self.df.key <= 3) | (self.df.value < "2")).count())
self.assertRaises(ValueError, lambda: self.df.key <= 3 or self.df.value < "2")
self.assertEqual(99, self.df.filter(~(self.df.key == 1)).count())
self.assertRaises(ValueError, lambda: not self.df.key == 1)
def test_udf_with_callable(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
class PlusFour:
def __call__(self, col):
if col is not None:
return col + 4
call = PlusFour()
pudf = UserDefinedFunction(call, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf_with_partial_function(self):
d = [Row(number=i, squared=i**2) for i in range(10)]
rdd = self.sc.parallelize(d)
data = self.spark.createDataFrame(rdd)
def some_func(col, param):
if col is not None:
return col + param
pfunc = functools.partial(some_func, param=4)
pudf = UserDefinedFunction(pfunc, LongType())
res = data.select(pudf(data['number']).alias('plus_four'))
self.assertEqual(res.agg({'plus_four': 'sum'}).collect()[0][0], 85)
def test_udf(self):
self.spark.catalog.registerFunction("twoArgs", lambda x, y: len(x) + y, IntegerType())
[row] = self.spark.sql("SELECT twoArgs('test', 1)").collect()
self.assertEqual(row[0], 5)
def test_udf2(self):
self.spark.catalog.registerFunction("strlen", lambda string: len(string), IntegerType())
self.spark.createDataFrame(self.sc.parallelize([Row(a="test")]))\
.createOrReplaceTempView("test")
[res] = self.spark.sql("SELECT strlen(a) FROM test WHERE strlen(a) > 1").collect()
self.assertEqual(4, res[0])
def test_chained_udf(self):
self.spark.catalog.registerFunction("double", lambda x: x + x, IntegerType())
[row] = self.spark.sql("SELECT double(1)").collect()
self.assertEqual(row[0], 2)
[row] = self.spark.sql("SELECT double(double(1))").collect()
self.assertEqual(row[0], 4)
[row] = self.spark.sql("SELECT double(double(1) + 1)").collect()
self.assertEqual(row[0], 6)
def test_single_udf_with_repeated_argument(self):
# regression test for SPARK-20685
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
row = self.spark.sql("SELECT add(1, 1)").first()
self.assertEqual(tuple(row), (2, ))
def test_multiple_udfs(self):
self.spark.catalog.registerFunction("double", lambda x: x * 2, IntegerType())
[row] = self.spark.sql("SELECT double(1), double(2)").collect()
self.assertEqual(tuple(row), (2, 4))
[row] = self.spark.sql("SELECT double(double(1)), double(double(2) + 2)").collect()
self.assertEqual(tuple(row), (4, 12))
self.spark.catalog.registerFunction("add", lambda x, y: x + y, IntegerType())
[row] = self.spark.sql("SELECT double(add(1, 2)), add(double(2), 1)").collect()
self.assertEqual(tuple(row), (6, 5))
def test_udf_in_filter_on_top_of_outer_join(self):
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(a=1)])
df = left.join(right, on='a', how='left_outer')
df = df.withColumn('b', udf(lambda x: 'x')(df.a))
self.assertEqual(df.filter('b = "x"').collect(), [Row(a=1, b='x')])
def test_udf_in_filter_on_top_of_join(self):
# regression test for SPARK-18589
from pyspark.sql.functions import udf
left = self.spark.createDataFrame([Row(a=1)])
right = self.spark.createDataFrame([Row(b=1)])
f = udf(lambda a, b: a == b, BooleanType())
df = left.crossJoin(right).filter(f("a", "b"))
self.assertEqual(df.collect(), [Row(a=1, b=1)])
def test_udf_without_arguments(self):
self.spark.catalog.registerFunction("foo", lambda: "bar")
[row] = self.spark.sql("SELECT foo()").collect()
self.assertEqual(row[0], "bar")
def test_udf_with_array_type(self):
d = [Row(l=list(range(3)), d={"key": list(range(5))})]
rdd = self.sc.parallelize(d)
self.spark.createDataFrame(rdd).createOrReplaceTempView("test")
self.spark.catalog.registerFunction("copylist", lambda l: list(l), ArrayType(IntegerType()))
self.spark.catalog.registerFunction("maplen", lambda d: len(d), IntegerType())
[(l1, l2)] = self.spark.sql("select copylist(l), maplen(d) from test").collect()
self.assertEqual(list(range(3)), l1)
self.assertEqual(1, l2)
def test_broadcast_in_udf(self):
bar = {"a": "aa", "b": "bb", "c": "abc"}
foo = self.sc.broadcast(bar)
self.spark.catalog.registerFunction("MYUDF", lambda x: foo.value[x] if x else '')
[res] = self.spark.sql("SELECT MYUDF('c')").collect()
self.assertEqual("abc", res[0])
[res] = self.spark.sql("SELECT MYUDF('')").collect()
self.assertEqual("", res[0])
def test_udf_with_filter_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a < 2, BooleanType())
sel = df.select(col("key"), col("value")).filter((my_filter(col("key"))) & (df.value < "2"))
self.assertEqual(sel.collect(), [Row(key=1, value='1')])
def test_udf_with_aggregate_function(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql.functions import udf, col, sum
from pyspark.sql.types import BooleanType
my_filter = udf(lambda a: a == 1, BooleanType())
sel = df.select(col("key")).distinct().filter(my_filter(col("key")))
self.assertEqual(sel.collect(), [Row(key=1)])
my_copy = udf(lambda x: x, IntegerType())
my_add = udf(lambda a, b: int(a + b), IntegerType())
my_strlen = udf(lambda x: len(x), IntegerType())
sel = df.groupBy(my_copy(col("key")).alias("k"))\
.agg(sum(my_strlen(col("value"))).alias("s"))\
.select(my_add(col("k"), col("s")).alias("t"))
self.assertEqual(sel.collect(), [Row(t=4), Row(t=3)])
def test_udf_in_generate(self):
from pyspark.sql.functions import udf, explode
df = self.spark.range(5)
f = udf(lambda x: list(range(x)), ArrayType(LongType()))
row = df.select(explode(f(*df))).groupBy().sum().first()
self.assertEqual(row[0], 10)
df = self.spark.range(3)
res = df.select("id", explode(f(df.id))).collect()
self.assertEqual(res[0][0], 1)
self.assertEqual(res[0][1], 0)
self.assertEqual(res[1][0], 2)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 2)
self.assertEqual(res[2][1], 1)
range_udf = udf(lambda value: list(range(value - 1, value + 1)), ArrayType(IntegerType()))
res = df.select("id", explode(range_udf(df.id))).collect()
self.assertEqual(res[0][0], 0)
self.assertEqual(res[0][1], -1)
self.assertEqual(res[1][0], 0)
self.assertEqual(res[1][1], 0)
self.assertEqual(res[2][0], 1)
self.assertEqual(res[2][1], 0)
self.assertEqual(res[3][0], 1)
self.assertEqual(res[3][1], 1)
def test_udf_with_order_by_and_limit(self):
from pyspark.sql.functions import udf
my_copy = udf(lambda x: x, IntegerType())
df = self.spark.range(10).orderBy("id")
res = df.select(df.id, my_copy(df.id).alias("copy")).limit(1)
res.explain(True)
self.assertEqual(res.collect(), [Row(id=0, copy=0)])
def test_udf_registration_returns_udf(self):
df = self.spark.range(10)
add_three = self.spark.udf.register("add_three", lambda x: x + 3, IntegerType())
self.assertListEqual(
df.selectExpr("add_three(id) AS plus_three").collect(),
df.select(add_three("id").alias("plus_three")).collect()
)
def test_non_existed_udf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udf",
lambda: spark.udf.registerJavaFunction("udf1", "non_existed_udf"))
def test_non_existed_udaf(self):
spark = self.spark
self.assertRaisesRegexp(AnalysisException, "Can not load class non_existed_udaf",
lambda: spark.udf.registerJavaUDAF("udaf1", "non_existed_udaf"))
def test_multiLine_json(self):
people1 = self.spark.read.json("python/test_support/sql/people.json")
people_array = self.spark.read.json("python/test_support/sql/people_array.json",
multiLine=True)
self.assertEqual(people1.collect(), people_array.collect())
def test_multiline_csv(self):
ages_newlines = self.spark.read.csv(
"python/test_support/sql/ages_newlines.csv", multiLine=True)
expected = [Row(_c0=u'Joe', _c1=u'20', _c2=u'Hi,\nI am Jeo'),
Row(_c0=u'Tom', _c1=u'30', _c2=u'My name is Tom'),
Row(_c0=u'Hyukjin', _c1=u'25', _c2=u'I am Hyukjin\n\nI love Spark!')]
self.assertEqual(ages_newlines.collect(), expected)
def test_ignorewhitespace_csv(self):
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.spark.createDataFrame([[" a", "b ", " c "]]).write.csv(
tmpPath,
ignoreLeadingWhiteSpace=False,
ignoreTrailingWhiteSpace=False)
expected = [Row(value=u' a,b , c ')]
readback = self.spark.read.text(tmpPath)
self.assertEqual(readback.collect(), expected)
shutil.rmtree(tmpPath)
def test_read_multiple_orc_file(self):
df = self.spark.read.orc(["python/test_support/sql/orc_partitioned/b=0/c=0",
"python/test_support/sql/orc_partitioned/b=1/c=1"])
self.assertEqual(2, df.count())
def test_udf_with_input_file_name(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
sourceFile = udf(lambda path: path, StringType())
filePath = "python/test_support/sql/people1.json"
row = self.spark.read.json(filePath).select(sourceFile(input_file_name())).first()
self.assertTrue(row[0].find("people1.json") != -1)
def test_udf_with_input_file_name_for_hadooprdd(self):
from pyspark.sql.functions import udf, input_file_name
from pyspark.sql.types import StringType
def filename(path):
return path
sameText = udf(filename, StringType())
rdd = self.sc.textFile('python/test_support/sql/people.json')
df = self.spark.read.json(rdd).select(input_file_name().alias('file'))
row = df.select(sameText(df['file'])).first()
self.assertTrue(row[0].find("people.json") != -1)
rdd2 = self.sc.newAPIHadoopFile(
'python/test_support/sql/people.json',
'org.apache.hadoop.mapreduce.lib.input.TextInputFormat',
'org.apache.hadoop.io.LongWritable',
'org.apache.hadoop.io.Text')
df2 = self.spark.read.json(rdd2).select(input_file_name().alias('file'))
row2 = df2.select(sameText(df2['file'])).first()
self.assertTrue(row2[0].find("people.json") != -1)
def test_udf_defers_judf_initalization(self):
# This is separate of UDFInitializationTests
# to avoid context initialization
# when udf is called
from pyspark.sql.functions import UserDefinedFunction
f = UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
f._judf_placeholder,
"judf should not be initialized before the first call."
)
self.assertIsInstance(f("foo"), Column, "UDF call should return a Column.")
self.assertIsNotNone(
f._judf_placeholder,
"judf should be initialized after UDF has been called."
)
def test_udf_with_string_return_type(self):
from pyspark.sql.functions import UserDefinedFunction
add_one = UserDefinedFunction(lambda x: x + 1, "integer")
make_pair = UserDefinedFunction(lambda x: (-x, x), "struct<x:integer,y:integer>")
make_array = UserDefinedFunction(
lambda x: [float(x) for x in range(x, x + 3)], "array<double>")
expected = (2, Row(x=-1, y=1), [1.0, 2.0, 3.0])
actual = (self.spark.range(1, 2).toDF("x")
.select(add_one("x"), make_pair("x"), make_array("x"))
.first())
self.assertTupleEqual(expected, actual)
def test_udf_shouldnt_accept_noncallable_object(self):
from pyspark.sql.functions import UserDefinedFunction
from pyspark.sql.types import StringType
non_callable = None
self.assertRaises(TypeError, UserDefinedFunction, non_callable, StringType())
def test_udf_with_decorator(self):
from pyspark.sql.functions import lit, udf
from pyspark.sql.types import IntegerType, DoubleType
@udf(IntegerType())
def add_one(x):
if x is not None:
return x + 1
@udf(returnType=DoubleType())
def add_two(x):
if x is not None:
return float(x + 2)
@udf
def to_upper(x):
if x is not None:
return x.upper()
@udf()
def to_lower(x):
if x is not None:
return x.lower()
@udf
def substr(x, start, end):
if x is not None:
return x[start:end]
@udf("long")
def trunc(x):
return int(x)
@udf(returnType="double")
def as_double(x):
return float(x)
df = (
self.spark
.createDataFrame(
[(1, "Foo", "foobar", 3.0)], ("one", "Foo", "foobar", "float"))
.select(
add_one("one"), add_two("one"),
to_upper("Foo"), to_lower("Foo"),
substr("foobar", lit(0), lit(3)),
trunc("float"), as_double("one")))
self.assertListEqual(
[tpe for _, tpe in df.dtypes],
["int", "double", "string", "string", "string", "bigint", "double"]
)
self.assertListEqual(
list(df.first()),
[2, 3.0, "FOO", "foo", "foo", 3, 1.0]
)
def test_udf_wrapper(self):
from pyspark.sql.functions import udf
from pyspark.sql.types import IntegerType
def f(x):
"""Identity"""
return x
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
class F(object):
"""Identity"""
def __call__(self, x):
return x
f = F()
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
f = functools.partial(f, x=1)
return_type = IntegerType()
f_ = udf(f, return_type)
self.assertTrue(f.__doc__ in f_.__doc__)
self.assertEqual(f, f_.func)
self.assertEqual(return_type, f_.returnType)
def test_basic_functions(self):
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
df.count()
df.collect()
df.schema
# cache and checkpoint
self.assertFalse(df.is_cached)
df.persist()
df.unpersist(True)
df.cache()
self.assertTrue(df.is_cached)
self.assertEqual(2, df.count())
df.createOrReplaceTempView("temp")
df = self.spark.sql("select foo from temp")
df.count()
df.collect()
def test_apply_schema_to_row(self):
df = self.spark.read.json(self.sc.parallelize(["""{"a":2}"""]))
df2 = self.spark.createDataFrame(df.rdd.map(lambda x: x), df.schema)
self.assertEqual(df.collect(), df2.collect())
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_infer_schema_to_local(self):
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
df = self.spark.createDataFrame(input)
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, df.schema)
self.assertEqual(10, df3.count())
def test_apply_schema_to_dict_and_rows(self):
schema = StructType().add("b", StringType()).add("a", IntegerType())
input = [{"a": 1}, {"b": "coffee"}]
rdd = self.sc.parallelize(input)
for verify in [False, True]:
df = self.spark.createDataFrame(input, schema, verifySchema=verify)
df2 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(df.schema, df2.schema)
rdd = self.sc.parallelize(range(10)).map(lambda x: Row(a=x, b=None))
df3 = self.spark.createDataFrame(rdd, schema, verifySchema=verify)
self.assertEqual(10, df3.count())
input = [Row(a=x, b=str(x)) for x in range(10)]
df4 = self.spark.createDataFrame(input, schema, verifySchema=verify)
self.assertEqual(10, df4.count())
def test_create_dataframe_schema_mismatch(self):
input = [Row(a=1)]
rdd = self.sc.parallelize(range(3)).map(lambda i: Row(a=i))
schema = StructType([StructField("a", IntegerType()), StructField("b", StringType())])
df = self.spark.createDataFrame(rdd, schema)
self.assertRaises(Exception, lambda: df.show())
def test_serialize_nested_array_and_map(self):
d = [Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
row = df.head()
self.assertEqual(1, len(row.l))
self.assertEqual(1, row.l[0].a)
self.assertEqual("2", row.d["key"].d)
l = df.rdd.map(lambda x: x.l).first()
self.assertEqual(1, len(l))
self.assertEqual('s', l[0].b)
d = df.rdd.map(lambda x: x.d).first()
self.assertEqual(1, len(d))
self.assertEqual(1.0, d["key"].c)
row = df.rdd.map(lambda x: x.d["key"]).first()
self.assertEqual(1.0, row.c)
self.assertEqual("2", row.d)
def test_infer_schema(self):
d = [Row(l=[], d={}, s=None),
Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")}, s="")]
rdd = self.sc.parallelize(d)
df = self.spark.createDataFrame(rdd)
self.assertEqual([], df.rdd.map(lambda r: r.l).first())
self.assertEqual([None, ""], df.rdd.map(lambda r: r.s).collect())
df.createOrReplaceTempView("test")
result = self.spark.sql("SELECT l[0].a from test where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
df2 = self.spark.createDataFrame(rdd, samplingRatio=1.0)
self.assertEqual(df.schema, df2.schema)
self.assertEqual({}, df2.rdd.map(lambda r: r.d).first())
self.assertEqual([None, ""], df2.rdd.map(lambda r: r.s).collect())
df2.createOrReplaceTempView("test2")
result = self.spark.sql("SELECT l[0].a from test2 where d['key'].d = '2'")
self.assertEqual(1, result.head()[0])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
nestedRdd1 = self.sc.parallelize([NestedRow([1, 2], {"row1": 1.0}),
NestedRow([2, 3], {"row2": 2.0})])
df = self.spark.createDataFrame(nestedRdd1)
self.assertEqual(Row(f1=[1, 2], f2={u'row1': 1.0}), df.collect()[0])
nestedRdd2 = self.sc.parallelize([NestedRow([[1, 2], [2, 3]], [1, 2]),
NestedRow([[2, 3], [3, 4]], [2, 3])])
df = self.spark.createDataFrame(nestedRdd2)
self.assertEqual(Row(f1=[[1, 2], [2, 3]], f2=[1, 2]), df.collect()[0])
from collections import namedtuple
CustomRow = namedtuple('CustomRow', 'field1 field2')
rdd = self.sc.parallelize([CustomRow(field1=1, field2="row1"),
CustomRow(field1=2, field2="row2"),
CustomRow(field1=3, field2="row3")])
df = self.spark.createDataFrame(rdd)
self.assertEqual(Row(field1=1, field2=u'row1'), df.first())
def test_create_dataframe_from_objects(self):
data = [MyObject(1, "1"), MyObject(2, "2")]
df = self.spark.createDataFrame(data)
self.assertEqual(df.dtypes, [("key", "bigint"), ("value", "string")])
self.assertEqual(df.first(), Row(key=1, value="1"))
def test_select_null_literal(self):
df = self.spark.sql("select null as col")
self.assertEqual(Row(col=None), df.first())
def test_apply_schema(self):
from datetime import date, datetime
rdd = self.sc.parallelize([(127, -128, -32768, 32767, 2147483647, 1.0,
date(2010, 1, 1), datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3], None)])
schema = StructType([
StructField("byte1", ByteType(), False),
StructField("byte2", ByteType(), False),
StructField("short1", ShortType(), False),
StructField("short2", ShortType(), False),
StructField("int1", IntegerType(), False),
StructField("float1", FloatType(), False),
StructField("date1", DateType(), False),
StructField("time1", TimestampType(), False),
StructField("map1", MapType(StringType(), IntegerType(), False), False),
StructField("struct1", StructType([StructField("b", ShortType(), False)]), False),
StructField("list1", ArrayType(ByteType(), False), False),
StructField("null1", DoubleType(), True)])
df = self.spark.createDataFrame(rdd, schema)
results = df.rdd.map(lambda x: (x.byte1, x.byte2, x.short1, x.short2, x.int1, x.float1,
x.date1, x.time1, x.map1["a"], x.struct1.b, x.list1, x.null1))
r = (127, -128, -32768, 32767, 2147483647, 1.0, date(2010, 1, 1),
datetime(2010, 1, 1, 1, 1, 1), 1, 2, [1, 2, 3], None)
self.assertEqual(r, results.first())
df.createOrReplaceTempView("table2")
r = self.spark.sql("SELECT byte1 - 1 AS byte1, byte2 + 1 AS byte2, " +
"short1 + 1 AS short1, short2 - 1 AS short2, int1 - 1 AS int1, " +
"float1 + 1.5 as float1 FROM table2").first()
self.assertEqual((126, -127, -32767, 32766, 2147483646, 2.5), tuple(r))
from pyspark.sql.types import _parse_schema_abstract, _infer_schema_type
rdd = self.sc.parallelize([(127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1),
{"a": 1}, (2,), [1, 2, 3])])
abstract = "byte1 short1 float1 time1 map1{} struct1(b) list1[]"
schema = _parse_schema_abstract(abstract)
typedSchema = _infer_schema_type(rdd.first(), schema)
df = self.spark.createDataFrame(rdd, typedSchema)
r = (127, -32768, 1.0, datetime(2010, 1, 1, 1, 1, 1), {"a": 1}, Row(b=2), [1, 2, 3])
self.assertEqual(r, tuple(df.first()))
def test_struct_in_map(self):
d = [Row(m={Row(i=1): Row(s="")})]
df = self.sc.parallelize(d).toDF()
k, v = list(df.head().m.items())[0]
self.assertEqual(1, k.i)
self.assertEqual("", v.s)
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.asDict()['l'][0].a)
df = self.sc.parallelize([row]).toDF()
df.createOrReplaceTempView("test")
row = self.spark.sql("select l, d from test").head()
self.assertEqual(1, row.asDict()["l"][0].a)
self.assertEqual(1.0, row.asDict()['d']['key'].c)
def test_udt(self):
from pyspark.sql.types import _parse_datatype_json_string, _infer_type, _make_type_verifier
from pyspark.sql.tests import ExamplePointUDT, ExamplePoint
def check_datatype(datatype):
pickled = pickle.loads(pickle.dumps(datatype))
assert datatype == pickled
scala_datatype = self.spark._jsparkSession.parseDataType(datatype.json())
python_datatype = _parse_datatype_json_string(scala_datatype.json())
assert datatype == python_datatype
check_datatype(ExamplePointUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
check_datatype(structtype_with_udt)
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_make_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _make_type_verifier(ExamplePointUDT())([1.0, 2.0]))
check_datatype(PythonOnlyUDT())
structtype_with_udt = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
check_datatype(structtype_with_udt)
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_make_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(
ValueError,
lambda: _make_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_simple_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.show()
def test_nested_udt_in_df(self):
schema = StructType().add("key", LongType()).add("val", ArrayType(PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, [PythonOnlyPoint(float(i), float(i))]) for i in range(10)],
schema=schema)
df.collect()
schema = StructType().add("key", LongType()).add("val",
MapType(LongType(), PythonOnlyUDT()))
df = self.spark.createDataFrame(
[(i % 3, {i % 3: PythonOnlyPoint(float(i + 1), float(i + 1))}) for i in range(10)],
schema=schema)
df.collect()
def test_complex_nested_udt_in_df(self):
from pyspark.sql.functions import udf
schema = StructType().add("key", LongType()).add("val", PythonOnlyUDT())
df = self.spark.createDataFrame(
[(i % 3, PythonOnlyPoint(float(i), float(i))) for i in range(10)],
schema=schema)
df.collect()
gd = df.groupby("key").agg({"val": "collect_list"})
gd.collect()
udf = udf(lambda k, v: [(k, v[0])], ArrayType(df.schema))
gd.select(udf(*gd)).collect()
def test_udt_with_none(self):
df = self.spark.range(0, 10, 1, 1)
def myudf(x):
if x > 0:
return PythonOnlyPoint(float(x), float(x))
self.spark.catalog.registerFunction("udf", myudf, PythonOnlyUDT())
rows = [r[0] for r in df.selectExpr("udf(id)").take(2)]
self.assertEqual(rows, [None, PythonOnlyPoint(1, 1)])
def test_infer_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), ExamplePointUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
schema = df.schema
field = [f for f in schema.fields if f.name == "point"][0]
self.assertEqual(type(field.dataType), PythonOnlyUDT)
df.createOrReplaceTempView("labeled_point")
point = self.spark.sql("SELECT point FROM labeled_point").head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_apply_schema_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = (1.0, ExamplePoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = (1.0, PythonOnlyPoint(1.0, 2.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", PythonOnlyUDT(), False)])
df = self.spark.createDataFrame([row], schema)
point = df.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_udf_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: ExamplePoint(p.x + 1, p.y + 1), ExamplePointUDT())
self.assertEqual(ExamplePoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df = self.spark.createDataFrame([row])
self.assertEqual(1.0, df.rdd.map(lambda r: r.point.x).first())
udf = UserDefinedFunction(lambda p: p.y, DoubleType())
self.assertEqual(2.0, df.select(udf(df.point)).first()[0])
udf2 = UserDefinedFunction(lambda p: PythonOnlyPoint(p.x + 1, p.y + 1), PythonOnlyUDT())
self.assertEqual(PythonOnlyPoint(2.0, 3.0), df.select(udf2(df.point)).first()[0])
def test_parquet_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row = Row(label=1.0, point=ExamplePoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
output_dir = os.path.join(self.tempdir.name, "labeled_point")
df0.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, ExamplePoint(1.0, 2.0))
row = Row(label=1.0, point=PythonOnlyPoint(1.0, 2.0))
df0 = self.spark.createDataFrame([row])
df0.write.parquet(output_dir, mode='overwrite')
df1 = self.spark.read.parquet(output_dir)
point = df1.head().point
self.assertEqual(point, PythonOnlyPoint(1.0, 2.0))
def test_union_with_udt(self):
from pyspark.sql.tests import ExamplePoint, ExamplePointUDT
row1 = (1.0, ExamplePoint(1.0, 2.0))
row2 = (2.0, ExamplePoint(3.0, 4.0))
schema = StructType([StructField("label", DoubleType(), False),
StructField("point", ExamplePointUDT(), False)])
df1 = self.spark.createDataFrame([row1], schema)
df2 = self.spark.createDataFrame([row2], schema)
result = df1.union(df2).orderBy("label").collect()
self.assertEqual(
result,
[
Row(label=1.0, point=ExamplePoint(1.0, 2.0)),
Row(label=2.0, point=ExamplePoint(3.0, 4.0))
]
)
def test_column_operators(self):
ci = self.df.key
cs = self.df.value
c = ci == cs
self.assertTrue(isinstance((- ci - 1 - 2) % 3 * 2.5 / 3.5, Column))
rcc = (1 + ci), (1 - ci), (1 * ci), (1 / ci), (1 % ci), (1 ** ci), (ci ** 1)
self.assertTrue(all(isinstance(c, Column) for c in rcc))
cb = [ci == 5, ci != 0, ci > 3, ci < 4, ci >= 0, ci <= 7]
self.assertTrue(all(isinstance(c, Column) for c in cb))
cbool = (ci & ci), (ci | ci), (~ci)
self.assertTrue(all(isinstance(c, Column) for c in cbool))
css = cs.contains('a'), cs.like('a'), cs.rlike('a'), cs.asc(), cs.desc(),\
cs.startswith('a'), cs.endswith('a'), ci.eqNullSafe(cs)
self.assertTrue(all(isinstance(c, Column) for c in css))
self.assertTrue(isinstance(ci.cast(LongType()), Column))
self.assertRaisesRegexp(ValueError,
"Cannot apply 'in' operator against a column",
lambda: 1 in cs)
def test_column_getitem(self):
from pyspark.sql.functions import col
self.assertIsInstance(col("foo")[1:3], Column)
self.assertIsInstance(col("foo")[0], Column)
self.assertIsInstance(col("foo")["bar"], Column)
self.assertRaises(ValueError, lambda: col("foo")[0:10:2])
def test_column_select(self):
df = self.df
self.assertEqual(self.testData, df.select("*").collect())
self.assertEqual(self.testData, df.select(df.key, df.value).collect())
self.assertEqual([Row(value='1')], df.where(df.key == 1).select(df.value).collect())
def test_freqItems(self):
vals = [Row(a=1, b=-2.0) if i % 2 == 0 else Row(a=i, b=i * 1.0) for i in range(100)]
df = self.sc.parallelize(vals).toDF()
items = df.stat.freqItems(("a", "b"), 0.4).collect()[0]
self.assertTrue(1 in items[0])
self.assertTrue(-2.0 in items[1])
def test_aggregator(self):
df = self.df
g = df.groupBy()
self.assertEqual([99, 100], sorted(g.agg({'key': 'max', 'value': 'count'}).collect()[0]))
self.assertEqual([Row(**{"AVG(key#0)": 49.5})], g.mean().collect())
from pyspark.sql import functions
self.assertEqual((0, u'99'),
tuple(g.agg(functions.first(df.key), functions.last(df.value)).first()))
self.assertTrue(95 < g.agg(functions.approxCountDistinct(df.key)).first()[0])
self.assertEqual(100, g.agg(functions.countDistinct(df.value)).first()[0])
def test_first_last_ignorenulls(self):
from pyspark.sql import functions
df = self.spark.range(0, 100)
df2 = df.select(functions.when(df.id % 3 == 0, None).otherwise(df.id).alias("id"))
df3 = df2.select(functions.first(df2.id, False).alias('a'),
functions.first(df2.id, True).alias('b'),
functions.last(df2.id, False).alias('c'),
functions.last(df2.id, True).alias('d'))
self.assertEqual([Row(a=None, b=1, c=None, d=98)], df3.collect())
def test_approxQuantile(self):
df = self.sc.parallelize([Row(a=i, b=i+10) for i in range(10)]).toDF()
aq = df.stat.approxQuantile("a", [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aq, list))
self.assertEqual(len(aq), 3)
self.assertTrue(all(isinstance(q, float) for q in aq))
aqs = df.stat.approxQuantile(["a", "b"], [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqs, list))
self.assertEqual(len(aqs), 2)
self.assertTrue(isinstance(aqs[0], list))
self.assertEqual(len(aqs[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[0]))
self.assertTrue(isinstance(aqs[1], list))
self.assertEqual(len(aqs[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqs[1]))
aqt = df.stat.approxQuantile(("a", "b"), [0.1, 0.5, 0.9], 0.1)
self.assertTrue(isinstance(aqt, list))
self.assertEqual(len(aqt), 2)
self.assertTrue(isinstance(aqt[0], list))
self.assertEqual(len(aqt[0]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[0]))
self.assertTrue(isinstance(aqt[1], list))
self.assertEqual(len(aqt[1]), 3)
self.assertTrue(all(isinstance(q, float) for q in aqt[1]))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(123, [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(("a", 123), [0.1, 0.9], 0.1))
self.assertRaises(ValueError, lambda: df.stat.approxQuantile(["a", 123], [0.1, 0.9], 0.1))
def test_corr(self):
import math
df = self.sc.parallelize([Row(a=i, b=math.sqrt(i)) for i in range(10)]).toDF()
corr = df.stat.corr("a", "b")
self.assertTrue(abs(corr - 0.95734012) < 1e-6)
def test_cov(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
cov = df.stat.cov("a", "b")
self.assertTrue(abs(cov - 55.0 / 3) < 1e-6)
def test_crosstab(self):
df = self.sc.parallelize([Row(a=i % 3, b=i % 2) for i in range(1, 7)]).toDF()
ct = df.stat.crosstab("a", "b").collect()
ct = sorted(ct, key=lambda x: x[0])
for i, row in enumerate(ct):
self.assertEqual(row[0], str(i))
self.assertTrue(row[1], 1)
self.assertTrue(row[2], 1)
def test_math_functions(self):
df = self.sc.parallelize([Row(a=i, b=2 * i) for i in range(10)]).toDF()
from pyspark.sql import functions
import math
def get_values(l):
return [j[0] for j in l]
def assert_close(a, b):
c = get_values(b)
diff = [abs(v - c[k]) < 1e-6 for k, v in enumerate(a)]
return sum(diff) == len(a)
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos(df.a)).collect())
assert_close([math.cos(i) for i in range(10)],
df.select(functions.cos("a")).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df.a)).collect())
assert_close([math.sin(i) for i in range(10)],
df.select(functions.sin(df['a'])).collect())
assert_close([math.pow(i, 2 * i) for i in range(10)],
df.select(functions.pow(df.a, df.b)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2)).collect())
assert_close([math.pow(i, 2) for i in range(10)],
df.select(functions.pow(df.a, 2.0)).collect())
assert_close([math.hypot(i, 2 * i) for i in range(10)],
df.select(functions.hypot(df.a, df.b)).collect())
def test_rand_functions(self):
df = self.df
from pyspark.sql import functions
rnd = df.select('key', functions.rand()).collect()
for row in rnd:
assert row[1] >= 0.0 and row[1] <= 1.0, "got: %s" % row[1]
rndn = df.select('key', functions.randn(5)).collect()
for row in rndn:
assert row[1] >= -4.0 and row[1] <= 4.0, "got: %s" % row[1]
# If the specified seed is 0, we should use it.
# https://issues.apache.org/jira/browse/SPARK-9691
rnd1 = df.select('key', functions.rand(0)).collect()
rnd2 = df.select('key', functions.rand(0)).collect()
self.assertEqual(sorted(rnd1), sorted(rnd2))
rndn1 = df.select('key', functions.randn(0)).collect()
rndn2 = df.select('key', functions.randn(0)).collect()
self.assertEqual(sorted(rndn1), sorted(rndn2))
def test_array_contains_function(self):
from pyspark.sql.functions import array_contains
df = self.spark.createDataFrame([(["1", "2", "3"],), ([],)], ['data'])
actual = df.select(array_contains(df.data, 1).alias('b')).collect()
# The value argument can be implicitly castable to the element's type of the array.
self.assertEqual([Row(b=True), Row(b=False)], actual)
def test_between_function(self):
df = self.sc.parallelize([
Row(a=1, b=2, c=3),
Row(a=2, b=1, c=3),
Row(a=4, b=1, c=4)]).toDF()
self.assertEqual([Row(a=2, b=1, c=3), Row(a=4, b=1, c=4)],
df.filter(df.a.between(df.b, df.c)).collect())
def test_struct_type(self):
from pyspark.sql.types import StructType, StringType, StructField
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True),
StructField("f2", StringType(), True, None)])
self.assertEqual(struct1.fieldNames(), struct2.names)
self.assertEqual(struct1, struct2)
struct1 = (StructType().add(StructField("f1", StringType(), True))
.add(StructField("f2", StringType(), True, None)))
struct2 = StructType([StructField("f1", StringType(), True)])
self.assertNotEqual(struct1.fieldNames(), struct2.names)
self.assertNotEqual(struct1, struct2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: StructType().add("name"))
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
for field in struct1:
self.assertIsInstance(field, StructField)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertEqual(len(struct1), 2)
struct1 = StructType().add("f1", StringType(), True).add("f2", StringType(), True, None)
self.assertIs(struct1["f1"], struct1.fields[0])
self.assertIs(struct1[0], struct1.fields[0])
self.assertEqual(struct1[0:1], StructType(struct1.fields[0:1]))
self.assertRaises(KeyError, lambda: struct1["f9"])
self.assertRaises(IndexError, lambda: struct1[9])
self.assertRaises(TypeError, lambda: struct1[9.9])
def test_parse_datatype_string(self):
from pyspark.sql.types import _all_atomic_types, _parse_datatype_string
for k, t in _all_atomic_types.items():
if t != NullType:
self.assertEqual(t(), _parse_datatype_string(k))
self.assertEqual(IntegerType(), _parse_datatype_string("int"))
self.assertEqual(DecimalType(1, 1), _parse_datatype_string("decimal(1 ,1)"))
self.assertEqual(DecimalType(10, 1), _parse_datatype_string("decimal( 10,1 )"))
self.assertEqual(DecimalType(11, 1), _parse_datatype_string("decimal(11,1)"))
self.assertEqual(
ArrayType(IntegerType()),
_parse_datatype_string("array<int >"))
self.assertEqual(
MapType(IntegerType(), DoubleType()),
_parse_datatype_string("map< int, double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("struct<a:int, c:double >"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a:int, c:double"))
self.assertEqual(
StructType([StructField("a", IntegerType()), StructField("c", DoubleType())]),
_parse_datatype_string("a INT, c DOUBLE"))
def test_metadata_null(self):
from pyspark.sql.types import StructType, StringType, StructField
schema = StructType([StructField("f1", StringType(), True, None),
StructField("f2", StringType(), True, {'a': None})])
rdd = self.sc.parallelize([["a", "b"], ["c", "d"]])
self.spark.createDataFrame(rdd, schema)
def test_save_and_load(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.json(tmpPath, "overwrite")
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.save(format="json", mode="overwrite", path=tmpPath,
noUse="this options will not be used in save.")
actual = self.spark.read.load(format="json", path=tmpPath,
noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
csvpath = os.path.join(tempfile.mkdtemp(), 'data')
df.write.option('quote', None).format('csv').save(csvpath)
shutil.rmtree(tmpPath)
def test_save_and_load_builder(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.read.json(tmpPath, schema)
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
df.write.mode("overwrite").json(tmpPath)
actual = self.spark.read.json(tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
df.write.mode("overwrite").options(noUse="this options will not be used in save.")\
.option("noUse", "this option will not be used in save.")\
.format("json").save(path=tmpPath)
actual =\
self.spark.read.format("json")\
.load(path=tmpPath, noUse="this options will not be used in load.")
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
actual = self.spark.read.load(path=tmpPath)
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_stream_trigger(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
# Should take at least one arg
try:
df.writeStream.trigger()
except ValueError:
pass
# Should not take multiple args
try:
df.writeStream.trigger(once=True, processingTime='5 seconds')
except ValueError:
pass
# Should take only keyword args
try:
df.writeStream.trigger('5 seconds')
self.fail("Should have thrown an exception")
except TypeError:
pass
def test_stream_read_options(self):
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream\
.format('text')\
.option('path', 'python/test_support/sql/streaming')\
.schema(schema)\
.load()
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_read_options_overwrite(self):
bad_schema = StructType([StructField("test", IntegerType(), False)])
schema = StructType([StructField("data", StringType(), False)])
df = self.spark.readStream.format('csv').option('path', 'python/test_support/sql/fake') \
.schema(bad_schema)\
.load(path='python/test_support/sql/streaming', schema=schema, format='text')
self.assertTrue(df.isStreaming)
self.assertEqual(df.schema.simpleString(), "struct<data:string>")
def test_stream_save_options(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming') \
.withColumn('id', lit(1))
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream.option('checkpointLocation', chk).queryName('this_query') \
.format('parquet').partitionBy('id').outputMode('append').option('path', out).start()
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_save_options_overwrite(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
fake1 = os.path.join(tmpPath, 'fake1')
fake2 = os.path.join(tmpPath, 'fake2')
q = df.writeStream.option('checkpointLocation', fake1)\
.format('memory').option('path', fake2) \
.queryName('fake_query').outputMode('append') \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertEqual(q.name, 'this_query')
self.assertTrue(q.isActive)
q.processAllAvailable()
output_files = []
for _, _, files in os.walk(out):
output_files.extend([f for f in files if not f.startswith('.')])
self.assertTrue(len(output_files) > 0)
self.assertTrue(len(os.listdir(chk)) > 0)
self.assertFalse(os.path.isdir(fake1)) # should not have been created
self.assertFalse(os.path.isdir(fake2)) # should not have been created
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_status_and_progress(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
def func(x):
time.sleep(1)
return x
from pyspark.sql.functions import col, udf
sleep_udf = udf(func)
# Use "sleep_udf" to delay the progress update so that we can test `lastProgress` when there
# were no updates.
q = df.select(sleep_udf(col("value")).alias('value')).writeStream \
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
# "lastProgress" will return None in most cases. However, as it may be flaky when
# Jenkins is very slow, we don't assert it. If there is something wrong, "lastProgress"
# may throw error with a high chance and make this test flaky, so we should still be
# able to detect broken codes.
q.lastProgress
q.processAllAvailable()
lastProgress = q.lastProgress
recentProgress = q.recentProgress
status = q.status
self.assertEqual(lastProgress['name'], q.name)
self.assertEqual(lastProgress['id'], q.id)
self.assertTrue(any(p == lastProgress for p in recentProgress))
self.assertTrue(
"message" in status and
"isDataAvailable" in status and
"isTriggerActive" in status)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
q.awaitTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = q.awaitTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_stream_exception(self):
sdf = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
sq = sdf.writeStream.format('memory').queryName('query_explain').start()
try:
sq.processAllAvailable()
self.assertEqual(sq.exception(), None)
finally:
sq.stop()
from pyspark.sql.functions import col, udf
from pyspark.sql.utils import StreamingQueryException
bad_udf = udf(lambda x: 1 / 0)
sq = sdf.select(bad_udf(col("value")))\
.writeStream\
.format('memory')\
.queryName('this_query')\
.start()
try:
# Process some data to fail the query
sq.processAllAvailable()
self.fail("bad udf should fail the query")
except StreamingQueryException as e:
# This is expected
self.assertTrue("ZeroDivisionError" in e.desc)
finally:
sq.stop()
self.assertTrue(type(sq.exception()) is StreamingQueryException)
self.assertTrue("ZeroDivisionError" in sq.exception().desc)
def test_query_manager_await_termination(self):
df = self.spark.readStream.format('text').load('python/test_support/sql/streaming')
for q in self.spark._wrapped.streams.active:
q.stop()
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
self.assertTrue(df.isStreaming)
out = os.path.join(tmpPath, 'out')
chk = os.path.join(tmpPath, 'chk')
q = df.writeStream\
.start(path=out, format='parquet', queryName='this_query', checkpointLocation=chk)
try:
self.assertTrue(q.isActive)
try:
self.spark._wrapped.streams.awaitAnyTermination("hello")
self.fail("Expected a value exception")
except ValueError:
pass
now = time.time()
# test should take at least 2 seconds
res = self.spark._wrapped.streams.awaitAnyTermination(2.6)
duration = time.time() - now
self.assertTrue(duration >= 2)
self.assertFalse(res)
finally:
q.stop()
shutil.rmtree(tmpPath)
def test_help_command(self):
# Regression test for SPARK-5464
rdd = self.sc.parallelize(['{"foo":"bar"}', '{"foo":"baz"}'])
df = self.spark.read.json(rdd)
# render_doc() reproduces the help() exception without printing output
pydoc.render_doc(df)
pydoc.render_doc(df.foo)
pydoc.render_doc(df.take(1))
def test_access_column(self):
df = self.df
self.assertTrue(isinstance(df.key, Column))
self.assertTrue(isinstance(df['key'], Column))
self.assertTrue(isinstance(df[0], Column))
self.assertRaises(IndexError, lambda: df[2])
self.assertRaises(AnalysisException, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])
def test_column_name_with_non_ascii(self):
if sys.version >= '3':
columnName = "数量"
self.assertTrue(isinstance(columnName, str))
else:
columnName = unicode("数量", "utf-8")
self.assertTrue(isinstance(columnName, unicode))
schema = StructType([StructField(columnName, LongType(), True)])
df = self.spark.createDataFrame([(1,)], schema)
self.assertEqual(schema, df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])
def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.l.getItem(0)).first()[0])
self.assertEqual(1, df.select(df.r.a).first()[0])
self.assertEqual("b", df.select(df.r.getField("b")).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
self.assertEqual("v", df.select(df.d.getItem("k")).first()[0])
def test_field_accessor(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
self.assertEqual(1, df.select(df.r["a"]).first()[0])
self.assertEqual(1, df.select(df["r.a"]).first()[0])
self.assertEqual("b", df.select(df.r["b"]).first()[0])
self.assertEqual("b", df.select(df["r.b"]).first()[0])
self.assertEqual("v", df.select(df.d["k"]).first()[0])
def test_infer_long_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
df = self.sc.parallelize(longrow).toDF()
self.assertEqual(df.schema.fields[1].dataType, LongType())
# this saving as Parquet caused issues as well.
output_dir = os.path.join(self.tempdir.name, "infer_long_type")
df.write.parquet(output_dir)
df1 = self.spark.read.parquet(output_dir)
self.assertEqual('a', df1.first().f1)
self.assertEqual(100000000000000, df1.first().f2)
self.assertEqual(_infer_type(1), LongType())
self.assertEqual(_infer_type(2**10), LongType())
self.assertEqual(_infer_type(2**20), LongType())
self.assertEqual(_infer_type(2**31 - 1), LongType())
self.assertEqual(_infer_type(2**31), LongType())
self.assertEqual(_infer_type(2**61), LongType())
self.assertEqual(_infer_type(2**71), LongType())
def test_filter_with_datetime(self):
time = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000)
date = time.date()
row = Row(date=date, time=time)
df = self.spark.createDataFrame([row])
self.assertEqual(1, df.filter(df.date == date).count())
self.assertEqual(1, df.filter(df.time == time).count())
self.assertEqual(0, df.filter(df.date > date).count())
self.assertEqual(0, df.filter(df.time > time).count())
def test_filter_with_datetime_timezone(self):
dt1 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(0))
dt2 = datetime.datetime(2015, 4, 17, 23, 1, 2, 3000, tzinfo=UTCOffsetTimezone(1))
row = Row(date=dt1)
df = self.spark.createDataFrame([row])
self.assertEqual(0, df.filter(df.date == dt2).count())
self.assertEqual(1, df.filter(df.date > dt2).count())
self.assertEqual(0, df.filter(df.date < dt2).count())
def test_time_with_timezone(self):
day = datetime.date.today()
now = datetime.datetime.now()
ts = time.mktime(now.timetuple())
# class in __main__ is not serializable
from pyspark.sql.tests import UTCOffsetTimezone
utc = UTCOffsetTimezone()
utcnow = datetime.datetime.utcfromtimestamp(ts) # without microseconds
# add microseconds to utcnow (keeping year,month,day,hour,minute,second)
utcnow = datetime.datetime(*(utcnow.timetuple()[:6] + (now.microsecond, utc)))
df = self.spark.createDataFrame([(day, now, utcnow)])
day1, now1, utcnow1 = df.first()
self.assertEqual(day1, day)
self.assertEqual(now, now1)
self.assertEqual(now, utcnow1)
# regression test for SPARK-19561
def test_datetime_at_epoch(self):
epoch = datetime.datetime.fromtimestamp(0)
df = self.spark.createDataFrame([Row(date=epoch)])
first = df.select('date', lit(epoch).alias('lit_date')).first()
self.assertEqual(first['date'], epoch)
self.assertEqual(first['lit_date'], epoch)
def test_decimal(self):
from decimal import Decimal
schema = StructType([StructField("decimal", DecimalType(10, 5))])
df = self.spark.createDataFrame([(Decimal("3.14159"),)], schema)
row = df.select(df.decimal + 1).first()
self.assertEqual(row[0], Decimal("4.14159"))
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.parquet(tmpPath)
df2 = self.spark.read.parquet(tmpPath)
row = df2.first()
self.assertEqual(row[0], Decimal("3.14159"))
def test_dropna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# shouldn't drop a non-null row
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, 80.1)], schema).dropna().count(),
1)
# dropping rows with a single null value
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna().count(),
0)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='any').count(),
0)
# if how = 'all', only drop rows if all values are null
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(how='all').count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(None, None, None)], schema).dropna(how='all').count(),
0)
# how and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(how='any', subset=['name', 'age']).count(),
0)
# threshold
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 80.1)], schema).dropna(thresh=2).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, None)], schema).dropna(thresh=2).count(),
0)
# threshold and subset
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
1)
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', None, 180.9)], schema).dropna(thresh=2, subset=['name', 'age']).count(),
0)
# thresh should take precedence over how
self.assertEqual(self.spark.createDataFrame(
[(u'Alice', 50, None)], schema).dropna(
how='any', thresh=2, subset=['name', 'age']).count(),
1)
def test_fillna(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True),
StructField("spy", BooleanType(), True)])
# fillna shouldn't change non-null values
row = self.spark.createDataFrame([(u'Alice', 10, 80.1, True)], schema).fillna(50).first()
self.assertEqual(row.age, 10)
# fillna with int
row = self.spark.createDataFrame([(u'Alice', None, None, None)], schema).fillna(50).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.0)
# fillna with double
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(50.1).first()
self.assertEqual(row.age, 50)
self.assertEqual(row.height, 50.1)
# fillna with bool
row = self.spark.createDataFrame(
[(u'Alice', None, None, None)], schema).fillna(True).first()
self.assertEqual(row.age, None)
self.assertEqual(row.spy, True)
# fillna with string
row = self.spark.createDataFrame([(None, None, None, None)], schema).fillna("hello").first()
self.assertEqual(row.name, u"hello")
self.assertEqual(row.age, None)
# fillna with subset specified for numeric cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(50, subset=['name', 'age']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, 50)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for string cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna("haha", subset=['name', 'age']).first()
self.assertEqual(row.name, "haha")
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, None)
# fillna with subset specified for bool cols
row = self.spark.createDataFrame(
[(None, None, None, None)], schema).fillna(True, subset=['name', 'spy']).first()
self.assertEqual(row.name, None)
self.assertEqual(row.age, None)
self.assertEqual(row.height, None)
self.assertEqual(row.spy, True)
# fillna with dictionary for boolean types
row = self.spark.createDataFrame([Row(a=None), Row(a=True)]).fillna({"a": True}).first()
self.assertEqual(row.a, True)
def test_bitwise_operations(self):
from pyspark.sql import functions
row = Row(a=170, b=75)
df = self.spark.createDataFrame([row])
result = df.select(df.a.bitwiseAND(df.b)).collect()[0].asDict()
self.assertEqual(170 & 75, result['(a & b)'])
result = df.select(df.a.bitwiseOR(df.b)).collect()[0].asDict()
self.assertEqual(170 | 75, result['(a | b)'])
result = df.select(df.a.bitwiseXOR(df.b)).collect()[0].asDict()
self.assertEqual(170 ^ 75, result['(a ^ b)'])
result = df.select(functions.bitwiseNOT(df.b)).collect()[0].asDict()
self.assertEqual(~75, result['~b'])
def test_expr(self):
from pyspark.sql import functions
row = Row(a="length string", b=75)
df = self.spark.createDataFrame([row])
result = df.select(functions.expr("length(a)")).collect()[0].asDict()
self.assertEqual(13, result["length(a)"])
def test_replace(self):
schema = StructType([
StructField("name", StringType(), True),
StructField("age", IntegerType(), True),
StructField("height", DoubleType(), True)])
# replace with int
row = self.spark.createDataFrame([(u'Alice', 10, 10.0)], schema).replace(10, 20).first()
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 20.0)
# replace with double
row = self.spark.createDataFrame(
[(u'Alice', 80, 80.0)], schema).replace(80.0, 82.1).first()
self.assertEqual(row.age, 82)
self.assertEqual(row.height, 82.1)
# replace with string
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(u'Alice', u'Ann').first()
self.assertEqual(row.name, u"Ann")
self.assertEqual(row.age, 10)
# replace with subset specified by a string of a column name w/ actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='age').first()
self.assertEqual(row.age, 20)
# replace with subset specified by a string of a column name w/o actual change
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(10, 20, subset='height').first()
self.assertEqual(row.age, 10)
# replace with subset specified with one column replaced, another column not in subset
# stays unchanged.
row = self.spark.createDataFrame(
[(u'Alice', 10, 10.0)], schema).replace(10, 20, subset=['name', 'age']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 20)
self.assertEqual(row.height, 10.0)
# replace with subset specified but no column will be replaced
row = self.spark.createDataFrame(
[(u'Alice', 10, None)], schema).replace(10, 20, subset=['name', 'height']).first()
self.assertEqual(row.name, u'Alice')
self.assertEqual(row.age, 10)
self.assertEqual(row.height, None)
# replace with lists
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace([u'Alice'], [u'Ann']).first()
self.assertTupleEqual(row, (u'Ann', 10, 80.1))
# replace with dict
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}).first()
self.assertTupleEqual(row, (u'Alice', 11, 80.1))
# test backward compatibility with dummy value
dummy_value = 1
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({'Alice': 'Bob'}, dummy_value).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# test dict with mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: -10, 80.1: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', -10, 90.5))
# replace with tuples
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace((u'Alice', ), (u'Bob', )).first()
self.assertTupleEqual(row, (u'Bob', 10, 80.1))
# replace multiple columns
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80.0), (20, 90)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.0))
# test for mixed numerics
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace((10, 80), (20, 90.5)).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace({10: 20, 80: 90.5}).first()
self.assertTupleEqual(row, (u'Alice', 20, 90.5))
# replace with boolean
row = (self
.spark.createDataFrame([(u'Alice', 10, 80.0)], schema)
.selectExpr("name = 'Bob'", 'age <= 15')
.replace(False, True).first())
self.assertTupleEqual(row, (True, True))
# replace list while value is not given (default to None)
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(["Alice", "Bob"]).first()
self.assertTupleEqual(row, (None, 10, 80.0))
# replace string with None and then drop None rows
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace(u'Alice', None).dropna()
self.assertEqual(row.count(), 0)
# replace with number and None
row = self.spark.createDataFrame(
[(u'Alice', 10, 80.0)], schema).replace([10, 80], [20, None]).first()
self.assertTupleEqual(row, (u'Alice', 20, None))
# should fail if subset is not list, tuple or None
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({10: 11}, subset=1).first()
# should fail if to_replace and value have different length
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", "Bob"], ["Eve"]).first()
# should fail if when received unexpected type
with self.assertRaises(ValueError):
from datetime import datetime
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(datetime.now(), datetime.now()).first()
# should fail if provided mixed type replacements
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace(["Alice", 10], ["Eve", 20]).first()
with self.assertRaises(ValueError):
self.spark.createDataFrame(
[(u'Alice', 10, 80.1)], schema).replace({u"Alice": u"Bob", 10: 20}).first()
def test_capture_analysis_exception(self):
self.assertRaises(AnalysisException, lambda: self.spark.sql("select abc"))
self.assertRaises(AnalysisException, lambda: self.df.selectExpr("a + b"))
def test_capture_parse_exception(self):
self.assertRaises(ParseException, lambda: self.spark.sql("abc"))
def test_capture_illegalargument_exception(self):
self.assertRaisesRegexp(IllegalArgumentException, "Setting negative mapred.reduce.tasks",
lambda: self.spark.sql("SET mapred.reduce.tasks=-1"))
df = self.spark.createDataFrame([(1, 2)], ["a", "b"])
self.assertRaisesRegexp(IllegalArgumentException, "1024 is not in the permitted values",
lambda: df.select(sha2(df.a, 1024)).collect())
try:
df.select(sha2(df.a, 1024)).collect()
except IllegalArgumentException as e:
self.assertRegexpMatches(e.desc, "1024 is not in the permitted values")
self.assertRegexpMatches(e.stackTrace,
"org.apache.spark.sql.functions")
def test_with_column_with_existing_name(self):
keys = self.df.withColumn("key", self.df.key).select("key").collect()
self.assertEqual([r.key for r in keys], list(range(100)))
# regression test for SPARK-10417
def test_column_iterator(self):
def foo():
for x in self.df.key:
break
self.assertRaises(TypeError, foo)
# add test for SPARK-10577 (test broadcast join hint)
def test_functions_broadcast(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1"), (2, "2")], ("key", "value"))
# equijoin - should be converted into broadcast join
plan1 = df1.join(broadcast(df2), "key")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan1.toString().count("BroadcastHashJoin"))
# no join key -- should not be a broadcast join
plan2 = df1.crossJoin(broadcast(df2))._jdf.queryExecution().executedPlan()
self.assertEqual(0, plan2.toString().count("BroadcastHashJoin"))
# planner should not crash without a join
broadcast(df1)._jdf.queryExecution().executedPlan()
def test_generic_hints(self):
from pyspark.sql import DataFrame
df1 = self.spark.range(10e10).toDF("id")
df2 = self.spark.range(10e10).toDF("id")
self.assertIsInstance(df1.hint("broadcast"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", []), DataFrame)
# Dummy rules
self.assertIsInstance(df1.hint("broadcast", "foo", "bar"), DataFrame)
self.assertIsInstance(df1.hint("broadcast", ["foo", "bar"]), DataFrame)
plan = df1.join(df2.hint("broadcast"), "id")._jdf.queryExecution().executedPlan()
self.assertEqual(1, plan.toString().count("BroadcastHashJoin"))
def test_toDF_with_schema_string(self):
data = [Row(key=i, value=str(i)) for i in range(100)]
rdd = self.sc.parallelize(data, 5)
df = rdd.toDF("key: int, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:int,value:string>")
self.assertEqual(df.collect(), data)
# different but compatible field types can be used.
df = rdd.toDF("key: string, value: string")
self.assertEqual(df.schema.simpleString(), "struct<key:string,value:string>")
self.assertEqual(df.collect(), [Row(key=str(i), value=str(i)) for i in range(100)])
# field names can differ.
df = rdd.toDF(" a: int, b: string ")
self.assertEqual(df.schema.simpleString(), "struct<a:int,b:string>")
self.assertEqual(df.collect(), data)
# number of fields must match.
self.assertRaisesRegexp(Exception, "Length of object",
lambda: rdd.toDF("key: int").collect())
# field types mismatch will cause exception at runtime.
self.assertRaisesRegexp(Exception, "FloatType can not accept",
lambda: rdd.toDF("key: float, value: string").collect())
# flat schema values will be wrapped into row.
df = rdd.map(lambda row: row.key).toDF("int")
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
# users can use DataType directly instead of data type string.
df = rdd.map(lambda row: row.key).toDF(IntegerType())
self.assertEqual(df.schema.simpleString(), "struct<value:int>")
self.assertEqual(df.collect(), [Row(key=i) for i in range(100)])
def test_join_without_on(self):
df1 = self.spark.range(1).toDF("a")
df2 = self.spark.range(1).toDF("b")
try:
self.spark.conf.set("spark.sql.crossJoin.enabled", "false")
self.assertRaises(AnalysisException, lambda: df1.join(df2, how="inner").collect())
self.spark.conf.set("spark.sql.crossJoin.enabled", "true")
actual = df1.join(df2, how="inner").collect()
expected = [Row(a=0, b=0)]
self.assertEqual(actual, expected)
finally:
# We should unset this. Otherwise, other tests are affected.
self.spark.conf.unset("spark.sql.crossJoin.enabled")
# Regression test for invalid join methods when on is None, Spark-14761
def test_invalid_join_method(self):
df1 = self.spark.createDataFrame([("Alice", 5), ("Bob", 8)], ["name", "age"])
df2 = self.spark.createDataFrame([("Alice", 80), ("Bob", 90)], ["name", "height"])
self.assertRaises(IllegalArgumentException, lambda: df1.join(df2, how="invalid-join-type"))
# Cartesian products require cross join syntax
def test_require_cross(self):
from pyspark.sql.functions import broadcast
df1 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
df2 = self.spark.createDataFrame([(1, "1")], ("key", "value"))
# joins without conditions require cross join syntax
self.assertRaises(AnalysisException, lambda: df1.join(df2).collect())
# works with crossJoin
self.assertEqual(1, df1.crossJoin(df2).count())
def test_conf(self):
spark = self.spark
spark.conf.set("bogo", "sipeo")
self.assertEqual(spark.conf.get("bogo"), "sipeo")
spark.conf.set("bogo", "ta")
self.assertEqual(spark.conf.get("bogo"), "ta")
self.assertEqual(spark.conf.get("bogo", "not.read"), "ta")
self.assertEqual(spark.conf.get("not.set", "ta"), "ta")
self.assertRaisesRegexp(Exception, "not.set", lambda: spark.conf.get("not.set"))
spark.conf.unset("bogo")
self.assertEqual(spark.conf.get("bogo", "colombia"), "colombia")
def test_current_database(self):
spark = self.spark
spark.catalog._reset()
self.assertEquals(spark.catalog.currentDatabase(), "default")
spark.sql("CREATE DATABASE some_db")
spark.catalog.setCurrentDatabase("some_db")
self.assertEquals(spark.catalog.currentDatabase(), "some_db")
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.setCurrentDatabase("does_not_exist"))
def test_list_databases(self):
spark = self.spark
spark.catalog._reset()
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(databases, ["default"])
spark.sql("CREATE DATABASE some_db")
databases = [db.name for db in spark.catalog.listDatabases()]
self.assertEquals(sorted(databases), ["default", "some_db"])
def test_list_tables(self):
from pyspark.sql.catalog import Table
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
self.assertEquals(spark.catalog.listTables(), [])
self.assertEquals(spark.catalog.listTables("some_db"), [])
spark.createDataFrame([(1, 1)]).createOrReplaceTempView("temp_tab")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (name STRING, age INT) USING parquet")
tables = sorted(spark.catalog.listTables(), key=lambda t: t.name)
tablesDefault = sorted(spark.catalog.listTables("default"), key=lambda t: t.name)
tablesSomeDb = sorted(spark.catalog.listTables("some_db"), key=lambda t: t.name)
self.assertEquals(tables, tablesDefault)
self.assertEquals(len(tables), 2)
self.assertEquals(len(tablesSomeDb), 2)
self.assertEquals(tables[0], Table(
name="tab1",
database="default",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tables[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertEquals(tablesSomeDb[0], Table(
name="tab2",
database="some_db",
description=None,
tableType="MANAGED",
isTemporary=False))
self.assertEquals(tablesSomeDb[1], Table(
name="temp_tab",
database=None,
description=None,
tableType="TEMPORARY",
isTemporary=True))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listTables("does_not_exist"))
def test_list_functions(self):
from pyspark.sql.catalog import Function
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
functions = dict((f.name, f) for f in spark.catalog.listFunctions())
functionsDefault = dict((f.name, f) for f in spark.catalog.listFunctions("default"))
self.assertTrue(len(functions) > 200)
self.assertTrue("+" in functions)
self.assertTrue("like" in functions)
self.assertTrue("month" in functions)
self.assertTrue("to_date" in functions)
self.assertTrue("to_timestamp" in functions)
self.assertTrue("to_unix_timestamp" in functions)
self.assertTrue("current_database" in functions)
self.assertEquals(functions["+"], Function(
name="+",
description=None,
className="org.apache.spark.sql.catalyst.expressions.Add",
isTemporary=True))
self.assertEquals(functions, functionsDefault)
spark.catalog.registerFunction("temp_func", lambda x: str(x))
spark.sql("CREATE FUNCTION func1 AS 'org.apache.spark.data.bricks'")
spark.sql("CREATE FUNCTION some_db.func2 AS 'org.apache.spark.data.bricks'")
newFunctions = dict((f.name, f) for f in spark.catalog.listFunctions())
newFunctionsSomeDb = dict((f.name, f) for f in spark.catalog.listFunctions("some_db"))
self.assertTrue(set(functions).issubset(set(newFunctions)))
self.assertTrue(set(functions).issubset(set(newFunctionsSomeDb)))
self.assertTrue("temp_func" in newFunctions)
self.assertTrue("func1" in newFunctions)
self.assertTrue("func2" not in newFunctions)
self.assertTrue("temp_func" in newFunctionsSomeDb)
self.assertTrue("func1" not in newFunctionsSomeDb)
self.assertTrue("func2" in newFunctionsSomeDb)
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listFunctions("does_not_exist"))
def test_list_columns(self):
from pyspark.sql.catalog import Column
spark = self.spark
spark.catalog._reset()
spark.sql("CREATE DATABASE some_db")
spark.sql("CREATE TABLE tab1 (name STRING, age INT) USING parquet")
spark.sql("CREATE TABLE some_db.tab2 (nickname STRING, tolerance FLOAT) USING parquet")
columns = sorted(spark.catalog.listColumns("tab1"), key=lambda c: c.name)
columnsDefault = sorted(spark.catalog.listColumns("tab1", "default"), key=lambda c: c.name)
self.assertEquals(columns, columnsDefault)
self.assertEquals(len(columns), 2)
self.assertEquals(columns[0], Column(
name="age",
description=None,
dataType="int",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns[1], Column(
name="name",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
columns2 = sorted(spark.catalog.listColumns("tab2", "some_db"), key=lambda c: c.name)
self.assertEquals(len(columns2), 2)
self.assertEquals(columns2[0], Column(
name="nickname",
description=None,
dataType="string",
nullable=True,
isPartition=False,
isBucket=False))
self.assertEquals(columns2[1], Column(
name="tolerance",
description=None,
dataType="float",
nullable=True,
isPartition=False,
isBucket=False))
self.assertRaisesRegexp(
AnalysisException,
"tab2",
lambda: spark.catalog.listColumns("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.listColumns("does_not_exist"))
def test_cache(self):
spark = self.spark
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab1")
spark.createDataFrame([(2, 2), (3, 3)]).createOrReplaceTempView("tab2")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab1")
self.assertTrue(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
spark.catalog.cacheTable("tab2")
spark.catalog.uncacheTable("tab1")
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertTrue(spark.catalog.isCached("tab2"))
spark.catalog.clearCache()
self.assertFalse(spark.catalog.isCached("tab1"))
self.assertFalse(spark.catalog.isCached("tab2"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.isCached("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.cacheTable("does_not_exist"))
self.assertRaisesRegexp(
AnalysisException,
"does_not_exist",
lambda: spark.catalog.uncacheTable("does_not_exist"))
def test_read_text_file_list(self):
df = self.spark.read.text(['python/test_support/sql/text-test.txt',
'python/test_support/sql/text-test.txt'])
count = df.count()
self.assertEquals(count, 4)
def test_BinaryType_serialization(self):
# Pyrolite version <= 4.9 could not serialize BinaryType with Python3 SPARK-17808
schema = StructType([StructField('mybytes', BinaryType())])
data = [[bytearray(b'here is my data')],
[bytearray(b'and here is some more')]]
df = self.spark.createDataFrame(data, schema=schema)
df.collect()
# test for SPARK-16542
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assertCollectSuccess(typecode, value):
row = Row(myarray=array.array(typecode, [value]))
df = self.spark.createDataFrame([row])
self.assertEqual(df.first()["myarray"][0], value)
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assertCollectSuccess('u', u'a')
if sys.version_info[0] < 3:
supported_string_types += ['c']
# test string
assertCollectSuccess('c', 'a')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assertCollectSuccess('f', ctypes.c_float(1e+38).value)
assertCollectSuccess('f', ctypes.c_float(1e-38).value)
assertCollectSuccess('f', ctypes.c_float(1.123456).value)
assertCollectSuccess('d', sys.float_info.max)
assertCollectSuccess('d', sys.float_info.min)
assertCollectSuccess('d', sys.float_info.epsilon)
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assertCollectSuccess(t, max_val - 1)
assertCollectSuccess(t, -max_val)
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys())
.intersection(set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
assertCollectSuccess(t, 2 ** (ctypes.sizeof(ctype) * 8) - 1)
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
# `array.typecodes` are not supported in python 2.
if sys.version_info[0] < 3:
all_types = set(['c', 'b', 'B', 'u', 'h', 'H', 'i', 'I', 'l', 'L', 'f', 'd'])
else:
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
a = array.array(t)
self.spark.createDataFrame([Row(myarray=a)]).collect()
def test_bucketed_write(self):
data = [
(1, "foo", 3.0), (2, "foo", 5.0),
(3, "bar", -1.0), (4, "bar", 6.0),
]
df = self.spark.createDataFrame(data, ["x", "y", "z"])
def count_bucketed_cols(names, table="pyspark_bucket"):
"""Given a sequence of column names and a table name
query the catalog and return number o columns which are
used for bucketing
"""
cols = self.spark.catalog.listColumns(table)
num = len([c for c in cols if c.name in names and c.isBucket])
return num
# Test write with one bucketing column
df.write.bucketBy(3, "x").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write two bucketing columns
df.write.bucketBy(3, "x", "y").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort
df.write.bucketBy(2, "x").sortBy("z").mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x"]), 1)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with a list of columns
df.write.bucketBy(3, ["x", "y"]).mode("overwrite").saveAsTable("pyspark_bucket")
self.assertEqual(count_bucketed_cols(["x", "y"]), 2)
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with a list of columns
(df.write.bucketBy(2, "x")
.sortBy(["y", "z"])
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
# Test write with bucket and sort with multiple columns
(df.write.bucketBy(2, "x")
.sortBy("y", "z")
.mode("overwrite").saveAsTable("pyspark_bucket"))
self.assertSetEqual(set(data), set(self.spark.table("pyspark_bucket").collect()))
@unittest.skipIf(not _have_pandas, "Pandas not installed")
def test_to_pandas(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
.add("c", BooleanType()).add("d", FloatType())
data = [
(1, "foo", True, 3.0), (2, "foo", True, 5.0),
(3, "bar", False, -1.0), (4, "bar", False, 6.0),
]
df = self.spark.createDataFrame(data, schema)
types = df.toPandas().dtypes
self.assertEquals(types[0], np.int32)
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.bool)
self.assertEquals(types[3], np.float32)
def test_create_dataframe_from_array_of_long(self):
import array
data = [Row(longarray=array.array('l', [-9223372036854775808, 0, 9223372036854775807]))]
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))
class HiveSparkSubmitTests(SparkSubmitTests):
def test_hivecontext(self):
# This test checks that HiveContext is using Hive metastore (SPARK-16224).
# It sets a metastore url and checks if there is a derby dir created by
# Hive metastore. If this derby dir exists, HiveContext is using
# Hive metastore.
metastore_path = os.path.join(tempfile.mkdtemp(), "spark16224_metastore_db")
metastore_URL = "jdbc:derby:;databaseName=" + metastore_path + ";create=true"
hive_site_dir = os.path.join(self.programDir, "conf")
hive_site_file = self.createTempFile("hive-site.xml", ("""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>%s</value>
| </property>
|</configuration>
""" % metastore_URL).lstrip(), "conf")
script = self.createTempFile("test.py", """
|import os
|
|from pyspark.conf import SparkConf
|from pyspark.context import SparkContext
|from pyspark.sql import HiveContext
|
|conf = SparkConf()
|sc = SparkContext(conf=conf)
|hive_context = HiveContext(sc)
|print(hive_context.sql("show databases").collect())
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]",
"--driver-class-path", hive_site_dir, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("default", out.decode('utf-8'))
self.assertTrue(os.path.exists(metastore_path))
class SQLTests2(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
cls.spark.stop()
# We can't include this test into SQLTests because we will stop class's SparkContext and cause
# other tests failed.
def test_sparksession_with_stopped_sparkcontext(self):
self.sc.stop()
sc = SparkContext('local[4]', self.sc.appName)
spark = SparkSession.builder.getOrCreate()
df = spark.createDataFrame([(1, 2)], ["c", "c"])
df.collect()
class UDFInitializationTests(unittest.TestCase):
def tearDown(self):
if SparkSession._instantiatedSession is not None:
SparkSession._instantiatedSession.stop()
if SparkContext._active_spark_context is not None:
SparkContext._active_spark_contex.stop()
def test_udf_init_shouldnt_initalize_context(self):
from pyspark.sql.functions import UserDefinedFunction
UserDefinedFunction(lambda x: x, StringType())
self.assertIsNone(
SparkContext._active_spark_context,
"SparkContext shouldn't be initialized when UserDefinedFunction is created."
)
self.assertIsNone(
SparkSession._instantiatedSession,
"SparkSession shouldn't be initialized when UserDefinedFunction is created."
)
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
except TypeError:
cls.tearDownClass()
raise unittest.SkipTest("Hive is not available")
os.unlink(cls.tempdir.name)
cls.spark = HiveContext._createForTesting(cls.sc)
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.createExternalTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.createExternalTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.getConf("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.createExternalTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date, datetime
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
@unittest.skipIf(sys.version_info < (3, 3), "Unittest < 3.3 doesn't support mocking")
def test_unbounded_frames(self):
from unittest.mock import patch
from pyspark.sql import functions as F
from pyspark.sql import window
import importlib
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
with patch("sys.maxsize", 2 ** 31 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 63 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
with patch("sys.maxsize", 2 ** 127 - 1):
importlib.reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
importlib.reload(window)
class DataTypeVerificationTests(unittest.TestCase):
def test_verify_type_exception_msg(self):
self.assertRaisesRegexp(
ValueError,
"test_name",
lambda: _make_type_verifier(StringType(), nullable=False, name="test_name")(None))
schema = StructType([StructField('a', StructType([StructField('b', IntegerType())]))])
self.assertRaisesRegexp(
TypeError,
"field b in field a",
lambda: _make_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [IntegerType(), FloatType(), StringType(), StructType([])]
for data_type in types:
try:
_make_type_verifier(data_type, nullable=True)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = StructType([
StructField('s', StringType(), nullable=False),
StructField('i', IntegerType(), nullable=True)])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", StringType()),
(u"", StringType()),
(1, StringType()),
(1.0, StringType()),
([], StringType()),
({}, StringType()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, BooleanType()),
# Byte
(-(2**7), ByteType()),
(2**7 - 1, ByteType()),
# Short
(-(2**15), ShortType()),
(2**15 - 1, ShortType()),
# Integer
(-(2**31), IntegerType()),
(2**31 - 1, IntegerType()),
# Long
(2**64, LongType()),
# Float & Double
(1.0, FloatType()),
(1.0, DoubleType()),
# Decimal
(decimal.Decimal("1.0"), DecimalType()),
# Binary
(bytearray([1, 2]), BinaryType()),
# Date/Timestamp
(datetime.date(2000, 1, 2), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), DateType()),
(datetime.datetime(2000, 1, 2, 3, 4), TimestampType()),
# Array
([], ArrayType(IntegerType())),
(["1", None], ArrayType(StringType(), containsNull=True)),
([1, 2], ArrayType(IntegerType())),
((1, 2), ArrayType(IntegerType())),
(array.array('h', [1, 2]), ArrayType(IntegerType())),
# Map
({}, MapType(StringType(), IntegerType())),
({"a": 1}, MapType(StringType(), IntegerType())),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=True)),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# String (match anything but None)
(None, StringType(), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, BooleanType(), TypeError),
("True", BooleanType(), TypeError),
([1], BooleanType(), TypeError),
# Byte
(-(2**7) - 1, ByteType(), ValueError),
(2**7, ByteType(), ValueError),
("1", ByteType(), TypeError),
(1.0, ByteType(), TypeError),
# Short
(-(2**15) - 1, ShortType(), ValueError),
(2**15, ShortType(), ValueError),
# Integer
(-(2**31) - 1, IntegerType(), ValueError),
(2**31, IntegerType(), ValueError),
# Float & Double
(1, FloatType(), TypeError),
(1, DoubleType(), TypeError),
# Decimal
(1.0, DecimalType(), TypeError),
(1, DecimalType(), TypeError),
("1.0", DecimalType(), TypeError),
# Binary
(1, BinaryType(), TypeError),
# Date/Timestamp
("2000-01-02", DateType(), TypeError),
(946811040, TimestampType(), TypeError),
# Array
(["1", None], ArrayType(StringType(), containsNull=False), ValueError),
([1, "2"], ArrayType(IntegerType()), TypeError),
# Map
({"a": 1}, MapType(IntegerType(), IntegerType()), TypeError),
({"a": "1"}, MapType(StringType(), IntegerType()), TypeError),
({"a": None}, MapType(StringType(), IntegerType(), valueContainsNull=False),
ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_make_type_verifier(data_type, nullable=False)(obj)
except Exception:
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_make_type_verifier(data_type, nullable=False)(obj)
@unittest.skipIf(not _have_arrow, "Arrow not installed")
class ArrowTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.spark = SparkSession(cls.sc)
cls.spark.conf.set("spark.sql.execution.arrow.enable", "true")
cls.schema = StructType([
StructField("1_str_t", StringType(), True),
StructField("2_int_t", IntegerType(), True),
StructField("3_long_t", LongType(), True),
StructField("4_float_t", FloatType(), True),
StructField("5_double_t", DoubleType(), True)])
cls.data = [("a", 1, 10, 0.2, 2.0),
("b", 2, 20, 0.4, 4.0),
("c", 3, 30, 0.8, 6.0)]
def assertFramesEqual(self, df_with_arrow, df_without):
msg = ("DataFrame from Arrow is not equal" +
("\n\nWith Arrow:\n%s\n%s" % (df_with_arrow, df_with_arrow.dtypes)) +
("\n\nWithout:\n%s\n%s" % (df_without, df_without.dtypes)))
self.assertTrue(df_without.equals(df_with_arrow), msg=msg)
def test_unsupported_datatype(self):
schema = StructType([StructField("dt", DateType(), True)])
df = self.spark.createDataFrame([(datetime.date(1970, 1, 1),)], schema=schema)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: df.toPandas())
def test_null_conversion(self):
df_null = self.spark.createDataFrame([tuple([None for _ in range(len(self.data[0]))])] +
self.data)
pdf = df_null.toPandas()
null_counts = pdf.isnull().sum().tolist()
self.assertTrue(all([c == 1 for c in null_counts]))
def test_toPandas_arrow_toggle(self):
df = self.spark.createDataFrame(self.data, schema=self.schema)
self.spark.conf.set("spark.sql.execution.arrow.enable", "false")
pdf = df.toPandas()
self.spark.conf.set("spark.sql.execution.arrow.enable", "true")
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_pandas_round_trip(self):
import pandas as pd
import numpy as np
data_dict = {}
for j, name in enumerate(self.schema.names):
data_dict[name] = [self.data[i][j] for i in range(len(self.data))]
# need to convert these to numpy types first
data_dict["2_int_t"] = np.int32(data_dict["2_int_t"])
data_dict["4_float_t"] = np.float32(data_dict["4_float_t"])
pdf = pd.DataFrame(data=data_dict)
df = self.spark.createDataFrame(self.data, schema=self.schema)
pdf_arrow = df.toPandas()
self.assertFramesEqual(pdf_arrow, pdf)
def test_filtered_frame(self):
df = self.spark.range(3).toDF("i")
pdf = df.filter("i < 0").toPandas()
self.assertEqual(len(pdf.columns), 1)
self.assertEqual(pdf.columns[0], "i")
self.assertTrue(pdf.empty)
if __name__ == "__main__":
from pyspark.sql.tests import *
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
|
xflin/spark
|
python/pyspark/sql/tests.py
|
Python
|
apache-2.0
| 131,465
|
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import imp
import mock
import os
from oslotest import base
from testtools.matchers import Mismatch
installs_squash_src = (os.path.dirname(os.path.realpath(__file__)) +
'/../bin/package-installs-squash')
installs_squash = imp.load_source('installs_squash', installs_squash_src)
class IsMatchingInstallList(object):
def __init__(self, expected):
self.expected = expected
def match(self, actual):
for phase, ops in self.expected.items():
if phase not in actual:
# missing the phase
return Mismatch(
"Phase %d does not exist in %s" % (phase, actual))
for op, pkgs in ops.items():
if op not in actual[phase]:
# missing op (install/uninstall)
return Mismatch(
"Operation %s does not exist in %s" % (op, ops))
# on py2 these can be out of order, we just want a match
expected_phase_ops = sorted(self.expected[phase][op])
actual_phase_ops = sorted(actual[phase][op])
if expected_phase_ops != actual_phase_ops:
return Mismatch(
"Operation list %s does not match expected %s" %
(actual[phase][op], self.expected[phase][op]))
class TestPackageInstall(base.BaseTestCase):
def setUp(self):
super(TestPackageInstall, self).setUp()
self.final_dict = collections.defaultdict(
functools.partial(collections.defaultdict, list))
def test_simple(self):
'''Test a basic package install'''
objs = {
'test_package': ''
}
result = installs_squash.collect_data(
self.final_dict, objs, 'test_element')
expected = {
'install.d': {
'install': [('test_package', 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
@mock.patch.object(os, 'environ', dict(ARCH='arm64'))
def test_arch(self):
'''Exercise the arch and not-arch flags'''
objs = {
'test_package': '',
'test_arm64_package': {
'arch': 'arm64'
},
'do_not_install': {
'not-arch': 'arm64'
}
}
result = installs_squash.collect_data(
self.final_dict, objs, 'test_element')
expected = {
'install.d': {
'install': [('test_package', 'test_element'),
('test_arm64_package', 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
kernel_objs = {
'linux-image-generic': [
{
'not-arch': 'arm64',
'when': 'DIB_UBUNTU_KERNEL = linux-image-generic',
},
{
'arch': 'arm64',
'when': (
'DIB_RELEASE != xenial',
'DIB_UBUNTU_KERNEL = linux-image-generic',
)
},
],
'linux-generic-hwe-16.04': {
'arch': 'arm64',
'when': (
'DIB_RELEASE = xenial',
'DIB_UBUNTU_KERNEL = linux-image-generic',
)
},
}
def _test_kernel_objs_match(self, arch, release, expected):
with mock.patch.object(os, 'environ',
dict(ARCH=arch,
DIB_UBUNTU_KERNEL='linux-image-generic',
DIB_RELEASE=release)):
result = installs_squash.collect_data(
self.final_dict, self.kernel_objs, 'test_element')
expected = {
'install.d': {
'install': [(expected, 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
def test_param_list_x86(self):
self._test_kernel_objs_match('x86_64', 'focal', 'linux-image-generic')
def test_param_list_arm64_xenial(self):
self._test_kernel_objs_match('arm64', 'xenial',
'linux-generic-hwe-16.04')
def test_param_list_arm64_focal(self):
self._test_kernel_objs_match('arm64', 'focal', 'linux-image-generic')
@mock.patch.object(os, 'environ', dict(DIB_FEATURE='1', **os.environ))
def test_skip_when(self):
'''Exercise the when flag'''
objs = {
'skipped_package': {
'when': 'DIB_FEATURE=0'
},
'not_skipped_package': {
'when': 'DIB_FEATURE=1'
},
'not_equal_package': {
'when': 'DIB_FEATURE!=0'
},
'not_equal_skipped_package': {
'when': 'DIB_FEATURE!=1'
},
}
result = installs_squash.collect_data(
self.final_dict, objs, 'test_element')
expected = {
'install.d': {
'install': [('not_skipped_package', 'test_element'),
('not_equal_package', 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
def test_skip_no_var(self):
'''Exercise the skip_when missing variable failure case'''
objs = {
'package': {
'when': 'MISSING_VAR=1'
},
}
self.assertRaises(RuntimeError, installs_squash.collect_data,
self.final_dict, objs, 'test_element')
@mock.patch.object(os, 'environ',
dict(
DIB_A_FEATURE='1',
DIB_B_FEATURE='1',
DIB_C_FEATURE='1'))
def test_skip_when_list(self):
'''Exercise the when flag with lists'''
objs = {
'not_skipped_package': {
'when': [
'DIB_A_FEATURE=1',
'DIB_B_FEATURE=1',
'DIB_C_FEATURE=1'
]
},
'skipped_package': {
'when': [
'DIB_A_FEATURE=1',
'DIB_B_FEATURE=0',
'DIB_C_FEATURE=1',
]
},
}
result = installs_squash.collect_data(
self.final_dict, objs, 'test_element')
expected = {
'install.d': {
'install': [('not_skipped_package', 'test_element')]
}
}
self.assertThat(result, IsMatchingInstallList(expected))
|
openstack/diskimage-builder
|
diskimage_builder/elements/package-installs/tests/test_package_squash.py
|
Python
|
apache-2.0
| 7,323
|
__author__ = 'alexs'
import theano.tensor as T
import theano
import numpy as np
import cPickle
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
import numpy as np
import random
import json
def getReferenceLabels():
referenceLabels = dict()
for i in range(0, 9):
reference_out = [0.0 for x in range(0, 9)]
reference_out[i] = 0.99
referenceLabels[i] = reference_out
return referenceLabels
def compare(result_label, given_label, reference_labels):
givenKey = 0
resultedKey = 0
refGivenScore = 1000
refResultedScore = 1000
for key in reference_labels.keys():
score1 = np.sum(np.abs(np.array(given_label) - np.array(reference_labels[key])))
score2 = np.sum(np.abs(result_label - np.array(reference_labels[key])))
if score1 < refGivenScore:
refGivenScore = score1
givenKey = key
if score2 < refResultedScore:
refResultedScore = score2
resultedKey = key
if resultedKey == givenKey:
return True
return False
def makeW(rows, columns, start=-2, end=2):
w = np.random.uniform(start, end, (rows, columns))
return w
def updates_weights_function(weights, memories, cost_function, learning_rate=0.01, momentum_learning_rate=0.005):
gradients = T.grad(cost_function, weights) # keep in mind len(gradients) == len(weights)
update_lists = []
for i in range(0, len(weights)):
weight = weights[i]
gradient = gradients[i]
memory = memories[i]
change = learning_rate * gradient + momentum_learning_rate * memory
new_val = weight - change
update_lists.append((weight, new_val))
update_lists.append((memory, change))
return update_lists
class NN():
def __init__(self):
self.layers = []
self.weights = []
self.weights_memory = []
self.cost = None
self.train = None
self.updates = None
self.activate = None
self.activatwe = None
self.output = None
def build(self, givenWeights=None):
# first: init or build the in-between weight matrixes
for i in range(0, len(self.layers) - 1):
n = self.layers[i].size
m = self.layers[i + 1].size
if givenWeights:
w_values = givenWeights[i]
else:
w_values = makeW(n, m)
w_memory_values = np.zeros((n, m))
w = theano.shared(value=w_values, name="w_" + str(i) + "_" + str(i + 1))
w_memory = theano.shared(value=w_memory_values, name="w_memory_" + str(i) + "_" + str(i + 1))
self.weights.append(w)
self.weights_memory.append(w_memory)
# now build the model
inputVector = T.matrix("inputVector")
labels = T.matrix("labels")
out = None
net = None
workingV = inputVector
l2 = 0.0
l1 = 0.0
for i in range(0, len(self.weights)):
w = self.weights[i]
l2 += T.sum(w * w)
l1 += T.sum(T.abs_(w))
out = T.dot(workingV, w)
net = T.maximum(0, out)
workingV = net
self.cost = T.mean(T.pow(labels - out, 2)) + 0.005 * l2 + 0.005 * l1
self.output = net
self.updates = updates_weights_function(self.weights, self.weights_memory, self.cost)
self.train = theano.function([inputVector, labels], outputs=self.cost, updates=self.updates)
self.activate = theano.function([inputVector, labels], outputs=self.cost)
self.activatwe = theano.function([inputVector], outputs=self.output)
def addLayer(self, layer):
self.layers.append(layer)
def snapshotWeigths(self, experimentId):
with open(str(experimentId) + ".dat", "w") as f:
for w in self.weights:
numeric_value = w.get_value().tolist()
f.write(json.dumps(numeric_value) + "\n")
def resume(self, experimentId="default"):
ww = []
with open(str(experimentId) + ".dat", "r") as f:
for line in f.readlines():
w = np.array(json.loads(line))
ww.append(w)
self.build(ww)
def trainData(self, train_set_input, train_set_labels,
valid_set_input, valid_set_labels,
test_set_input, test_set_labels,
nrOfEpochs=10000, batch_size=1000, experimentId="default"):
reference_labels = getReferenceLabels()
for ep in range(0, nrOfEpochs):
# random.shuffle(train_data)
overallError = 0.0
for j in range(0, len(train_set_input), batch_size):
endInterval = j + batch_size
if j + batch_size > len(train_set_input):
endInterval = len(train_set_input) - 1
i = train_set_input[j:endInterval]
r = train_set_labels[j:endInterval]
self.train(i, r)
for j in range(0, len(train_set_input), batch_size):
endInterval = j + batch_size
if j + batch_size > len(train_set_input):
endInterval = len(train_set_input) - 1
i = train_set_input[j:endInterval]
r = train_set_labels[j:endInterval]
overallError += self.activate(i, r)
posItems = 0.0
failedItems = 0.0
for valid_in, given_label in zip(valid_set_input, valid_set_labels):
result_label = self.activatwe([valid_in])
ok = compare(result_label, given_label, reference_labels)
if ok:
posItems += 1.0
else:
failedItems += 1.0
precision = posItems / (posItems + failedItems)
print(
"[{epoch}] error: {error} precision: {precision}".format(epoch=ep, error=overallError,
precision=precision))
# running tests
self.snapshotWeigths(experimentId)
if test_set_input and test_set_labels:
print("=================== TESTS ==================")
posItems = 0.0
failedItems = 0.0
for valid_in, given_label in zip(test_set_input, test_set_labels):
result_label = self.activatwe([valid_in])
ok = compare(result_label, given_label, reference_labels)
if ok:
posItems += 1.0
else:
failedItems += 1.0
precision = posItems / (posItems + failedItems)
print("Accuracy on {nrOfTests} tests is {precision}".format(nrOfTests=str(len(test_set_input)),
precision=str(precision)))
print("============================================")
class Layer():
def __init__(self, size):
self.size = size
class SigmoidLayer(Layer):
def __init__(self, size):
self.size = size
class StandardOutputWithSigmoid(Layer):
def __init__(self, size):
self.size = size
class InverseOutputLayerWithSigmoid(Layer):
def __init__(self, size):
self.size = size
def transformInput(inputList):
res = []
for input in inputList:
res.append(np.array(input, dtype="float32"))
return res
def transformOutput(outputList, size):
res = []
for out in outputList:
reference_out = [0.1 for x in range(0, size)]
reference_out[out] = 0.88
res.append(np.array(reference_out, dtype="float32"))
return res
def retrieve_training_set():
all_collections = []
df = pd.read_csv("/Users/alexs/work_phd/otto_group_challenge/train.csv")
X = df.values.copy()
np.random.shuffle(X)
X, labels = X[:, 1:-1].astype(np.float32), X[:, -1]
print labels
encoder = LabelEncoder()
encoded_labels = encoder.fit_transform(labels).astype(np.int32)
all_labels = []
scaler = StandardScaler()
Z = scaler.fit_transform(X)
for encoded_label in encoded_labels:
l = [0.0 for x in range(0, 9)]
l[encoded_label] = 0.99
all_labels.append(l)
return [Z, all_labels]
def retrieveTrainValidationSet(train_set, percentage=20.0):
train = train_set[0]
label = train_set[1]
all = []
for i in range(0, len(train)):
all.append((train[i], label[i]))
random.shuffle(all)
offset = int(len(train) * (percentage / 100.0))
validation_final = []
validation_input = []
validation_label = []
for i in range(0, offset):
(vi, vl) = all.pop(0)
validation_input.append(vi)
validation_label.append(vl)
validation_final.append(validation_input)
validation_final.append(validation_label)
training_final = []
training_in = []
training_label = []
for (ti, tl) in all:
training_in.append(ti)
training_label.append(tl)
training_final.append(training_in)
training_final.append(training_label)
return training_final, validation_final
def retrieve_test_set():
df = pd.read_csv("/Users/alexs/work_phd/otto_group_challenge/test.csv")
X = df.values.copy()
np.random.shuffle(X)
X = X[:, 1:].astype(np.float32)
scaler = StandardScaler()
Z = scaler.fit_transform(X)
return Z
def getClosest(out, reference):
refGivenScore = 1000
givenKey = 0
for key in reference.keys():
score1 = np.sum(np.abs(np.array(out) - np.array(reference[key])))
if score1 < refGivenScore:
refGivenScore = score1
givenKey = key
# p = [0 for i in range(0,9)]
# p[key]=1
cleaned_p = []
for p in reference[givenKey]:
if p < 0.4:
cleaned_p.append(0)
elif p > 0.8:
cleaned_p.append(0.95)
else:
cleaned_p.append(p)
return [str(p) for p in cleaned_p]
def main():
nn = NN()
nn.addLayer(SigmoidLayer(93))
nn.addLayer(SigmoidLayer(30))
nn.addLayer(InverseOutputLayerWithSigmoid(9))
nn.build()
# nn.resume()
original_data = retrieve_training_set()
test_data = retrieve_test_set()
batch_size = 2000
for i in range(0, 100):
print("BIG ITERATION: " + str(i))
training_set, validation_set = retrieveTrainValidationSet(original_data, percentage=30)
train_set_input, train_set_labels = training_set[0], training_set[1]
valid_set_input, valid_set_labels = validation_set[0], validation_set[1]
nn.trainData(train_set_input, train_set_labels,
valid_set_input, valid_set_labels,
None, None,
nrOfEpochs=10, batch_size=batch_size)
batch_size = batch_size - 50
if (batch_size < 100):
batch_size = 100
print("RUNNING THE TESTS")
referenceLabels = getReferenceLabels()
with open("submission.dat", "w") as w:
w.write("id,Class_1,Class_2,Class_3,Class_4,Class_5,Class_6,Class_7,Class_8,Class_9\n")
counter = 1
for test in test_data:
resultedLabel = nn.activatwe([test])
out = getClosest(resultedLabel, referenceLabels)
#w.write(str(counter) + "," + ",".join(out) + "\n")
a = [str(p) for p in resultedLabel[0]]
w.write(str(counter) + "," + ",".join(a) + "\n")
counter += 1
if __name__ == '__main__':
main()
|
big-data-research/neuralnetworks_workshop_bucharest_2015
|
nn_demo/01nn_otto.py
|
Python
|
apache-2.0
| 11,593
|
#!/usr/bin/env python
"""
Copyright 2012 GroupDocs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class ChangePasswordResult:
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
self.swaggerTypes = {
'user_guid': 'str'
}
self.user_guid = None # str
|
liosha2007/temporary-groupdocs-python3-sdk
|
groupdocs/models/ChangePasswordResult.py
|
Python
|
apache-2.0
| 920
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
from paddle.trainer_config_helpers.default_decorators import wrap_name_default
import paddle.trainer_config_helpers as conf_helps
class LayerType(type):
def __new__(cls, name, bases, attrs):
method_name = attrs.get('METHOD_NAME', None)
if method_name is not None:
method = getattr(conf_helps, method_name)
if method.__doc__ is not None:
mapper = attrs.get("__map_docstr__", None)
if mapper is not None:
attrs['__doc__'] = LayerType.__map_docstr__(
mapper(method.__doc__),
method_name=method_name,
name=name)
else:
attrs['__doc__'] = LayerType.__map_docstr__(
method.__doc__, method_name=method_name, name=name)
return super(LayerType, cls).__new__(cls, name, bases, attrs)
@staticmethod
def __map_docstr__(doc, name, method_name):
assert isinstance(doc, basestring)
# replace LayerOutput to paddle.v2.config_base.Layer
doc = doc.replace("LayerOutput", "paddle.v2.config_base.Layer")
doc = doc.replace('ParameterAttribute',
'paddle.v2.attr.ParameterAttribute')
doc = re.sub(r'ExtraLayerAttribute[^\s]?',
'paddle.v2.attr.ExtraAttribute', doc)
# xxx_layer to xxx
doc = re.sub(r"(?P<name>[a-z]+)_layer", r"\g<name>", doc)
# XxxxActivation to paddle.v2.Activation.Xxxx
doc = re.sub(r"(?P<name>[A-Z][a-zA-Z]+)Activation",
r"paddle.v2.Activation.\g<name>", doc)
# TODO(yuyang18): Add more rules if needed.
return doc
class Layer(object):
__metaclass__ = LayerType
def __init__(self, name=None, parent_layers=None):
assert isinstance(parent_layers, dict)
self.name = name
self.__contex__ = {}
self.__parent_layers__ = parent_layers
def to_proto(self, context):
"""
function to set proto attribute
"""
kwargs = dict()
for layer_name in self.__parent_layers__:
if not isinstance(self.__parent_layers__[layer_name],
collections.Sequence):
v1_layer = self.__parent_layers__[layer_name].to_proto(
context=context)
else:
v1_layer = map(lambda x: x.to_proto(context=context),
self.__parent_layers__[layer_name])
kwargs[layer_name] = v1_layer
if self.context_name() is None:
return self.to_proto_impl(**kwargs)
elif self.context_name() not in context:
context[self.context_name()] = self.to_proto_impl(**kwargs)
self.__contex__ = context
if self.use_context_name():
return context[self.context_name()]
else:
return context[self.name]
def to_proto_impl(self, **kwargs):
raise NotImplementedError()
def context_name(self):
"""
Context name means the context which stores `to_proto_impl` result.
If multiple layer share same context_name, the `to_proto_impl` of them
will be invoked only once.
"""
return self.name
def use_context_name(self):
return False
def calculate_size(self):
"""
lazy calculate size of the layer, should be called when to_proto_impl of
this layer is called.
:return:
"""
return self.__contex__[self.context_name()].size
def __convert_to_v2__(method_name, parent_names, is_default_name=True):
if is_default_name:
wrapper = wrap_name_default(name_prefix=method_name)
else:
wrapper = None
class V2LayerImpl(Layer):
METHOD_NAME = method_name
def __init__(self, **kwargs):
parent_layers = dict()
other_kwargs = dict()
for pname in parent_names:
if kwargs.has_key(pname):
parent_layers[pname] = kwargs[pname]
for key in kwargs.keys():
if key not in parent_names:
other_kwargs[key] = kwargs[key]
name = kwargs.get('name', None)
super(V2LayerImpl, self).__init__(name, parent_layers)
self.__other_kwargs__ = other_kwargs
if wrapper is not None:
__init__ = wrapper(__init__)
def to_proto_impl(self, **kwargs):
args = dict()
for each in kwargs:
args[each] = kwargs[each]
for each in self.__other_kwargs__:
args[each] = self.__other_kwargs__[each]
return getattr(conf_helps, method_name)(**args)
return V2LayerImpl
|
gangliao/Paddle
|
python/paddle/v2/config_base.py
|
Python
|
apache-2.0
| 5,435
|
# Copyright (C) 2018 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import io
import fixtures
import mock
from nova import conf
from nova.tests import fixtures as nova_fixtures
from nova.tests.fixtures import libvirt as fakelibvirt
from nova.tests.functional import integrated_helpers
CONF = conf.CONF
class ServersTestBase(integrated_helpers._IntegratedTestBase):
"""A libvirt-specific variant of the integrated test base."""
ADDITIONAL_FILTERS = []
def setUp(self):
self.flags(instances_path=self.useFixture(fixtures.TempDir()).path)
self.computes = {}
self.compute_rp_uuids = {}
super(ServersTestBase, self).setUp()
self.useFixture(nova_fixtures.LibvirtImageBackendFixture())
self.useFixture(nova_fixtures.LibvirtFixture())
self.useFixture(nova_fixtures.OSBrickFixture())
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._create_image',
return_value=(False, False)))
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.LibvirtDriver._get_local_gb_info',
return_value={'total': 128, 'used': 44, 'free': 84}))
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.is_valid_hostname',
return_value=True))
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.driver.libvirt_utils.file_open',
side_effect=lambda *a, **k: io.BytesIO(b'')))
self.useFixture(fixtures.MockPatch(
'nova.privsep.utils.supports_direct_io',
return_value=True))
self.useFixture(fixtures.MockPatch(
'nova.virt.libvirt.host.Host.get_online_cpus',
return_value=set(range(16))))
# Mock the 'get_connection' function, as we're going to need to provide
# custom capabilities for each test
_p = mock.patch('nova.virt.libvirt.host.Host.get_connection')
self.mock_conn = _p.start()
self.addCleanup(_p.stop)
def _setup_compute_service(self):
# NOTE(stephenfin): We don't start the compute service here as we wish
# to configure the host capabilities first. We instead start the
# service in the test
self.flags(compute_driver='libvirt.LibvirtDriver')
def _setup_scheduler_service(self):
enabled_filters = CONF.filter_scheduler.enabled_filters
enabled_filters += self.ADDITIONAL_FILTERS
self.flags(enabled_filters=enabled_filters, group='filter_scheduler')
return self.start_service('scheduler')
def _get_connection(
self, host_info=None, pci_info=None, mdev_info=None, vdpa_info=None,
libvirt_version=None, qemu_version=None, hostname=None,
):
if not host_info:
host_info = fakelibvirt.HostInfo(
cpu_nodes=2, cpu_sockets=1, cpu_cores=2, cpu_threads=2)
# sanity check
self.assertGreater(16, host_info.cpus,
"Host.get_online_cpus is only accounting for 16 CPUs but you're "
"requesting %d; change the mock or your test" % host_info.cpus)
libvirt_version = libvirt_version or fakelibvirt.FAKE_LIBVIRT_VERSION
qemu_version = qemu_version or fakelibvirt.FAKE_QEMU_VERSION
fake_connection = fakelibvirt.Connection(
'qemu:///system',
version=libvirt_version,
hv_version=qemu_version,
host_info=host_info,
pci_info=pci_info,
mdev_info=mdev_info,
vdpa_info=vdpa_info,
hostname=hostname)
return fake_connection
def start_compute(
self, hostname='compute1', host_info=None, pci_info=None,
mdev_info=None, vdpa_info=None, libvirt_version=None,
qemu_version=None,
):
"""Start a compute service.
The started service will be saved in self.computes, keyed by hostname.
:param hostname: A hostname.
:param host_info: A fakelibvirt.HostInfo object for the host. Defaults
to a HostInfo with 2 NUMA nodes, 2 cores per node, 2 threads per
core, and 16GB of RAM.
:returns: The hostname of the created service, which can be used to
lookup the created service and UUID of the assocaited resource
provider.
"""
def _start_compute(hostname, host_info):
fake_connection = self._get_connection(
host_info, pci_info, mdev_info, vdpa_info, libvirt_version,
qemu_version, hostname,
)
# This is fun. Firstly we need to do a global'ish mock so we can
# actually start the service.
with mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection):
compute = self.start_service('compute', host=hostname)
# Once that's done, we need to tweak the compute "service" to
# make sure it returns unique objects. We do this inside the
# mock context to avoid a small window between the end of the
# context and the tweaking where get_connection would revert to
# being an autospec mock.
compute.driver._host.get_connection = lambda: fake_connection
return compute
# ensure we haven't already registered services with these hostnames
self.assertNotIn(hostname, self.computes)
self.assertNotIn(hostname, self.compute_rp_uuids)
self.computes[hostname] = _start_compute(hostname, host_info)
self.compute_rp_uuids[hostname] = self.placement.get(
'/resource_providers?name=%s' % hostname).body[
'resource_providers'][0]['uuid']
return hostname
class LibvirtMigrationMixin(object):
"""A simple mixin to facilliate successful libvirt live migrations
Requires that the test class set self.server for the specific test instnace
and self.{src,dest} to indicate the direction of the migration. For any
scenarios more complex than this they should override _migrate_stub with
their own implementation.
"""
def setUp(self):
super().setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.tests.fixtures.libvirt.Domain.migrateToURI3',
self._migrate_stub))
self.migrate_stub_ran = False
def _migrate_stub(self, domain, destination, params, flags):
self.dest.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
conn = self.src.driver._host.get_connection()
dom = conn.lookupByUUIDString(self.server['id'])
dom.complete_job()
self.migrate_stub_ran = True
class LibvirtNeutronFixture(nova_fixtures.NeutronFixture):
"""A custom variant of the stock neutron fixture with more networks.
There are three networks available: two l2 networks (one flat and one VLAN)
and one l3 network (VXLAN).
"""
network_1 = {
'id': '3cb9bc59-5699-4588-a4b1-b87f96708bc6',
'status': 'ACTIVE',
'subnets': [],
'name': 'physical-network-foo',
'admin_state_up': True,
'tenant_id': nova_fixtures.NeutronFixture.tenant_id,
'provider:physical_network': 'foo',
'provider:network_type': 'flat',
'provider:segmentation_id': None,
}
network_2 = network_1.copy()
network_2.update({
'id': 'a252b8cd-2d99-4e82-9a97-ec1217c496f5',
'name': 'physical-network-bar',
'provider:physical_network': 'bar',
'provider:network_type': 'vlan',
'provider:segmentation_id': 123,
})
network_3 = network_1.copy()
network_3.update({
'id': '877a79cc-295b-4b80-9606-092bf132931e',
'name': 'tunneled-network',
'provider:physical_network': None,
'provider:network_type': 'vxlan',
'provider:segmentation_id': 69,
})
network_4 = network_1.copy()
network_4.update({
'id': '1b70879f-fd00-411e-8ea9-143e7820e61d',
'name': 'private-network',
'shared': False,
'provider:physical_network': 'physnet4',
"provider:network_type": "vlan",
'provider:segmentation_id': 42,
})
subnet_1 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_1.update({
'name': 'physical-subnet-foo',
})
subnet_2 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_2.update({
'id': 'b4c13749-c002-47ed-bf42-8b1d44fa9ff2',
'name': 'physical-subnet-bar',
'network_id': network_2['id'],
})
subnet_3 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_3.update({
'id': '4dacb20b-917f-4275-aa75-825894553442',
'name': 'tunneled-subnet',
'network_id': network_3['id'],
})
subnet_4 = nova_fixtures.NeutronFixture.subnet_1.copy()
subnet_4.update({
'id': '7cb343ec-6637-494c-89a1-8890eab7788e',
'name': 'physical-subnet-bar',
'network_id': network_4['id'],
})
network_1['subnets'] = [subnet_1]
network_2['subnets'] = [subnet_2]
network_3['subnets'] = [subnet_3]
network_4['subnets'] = [subnet_4]
network_1_port_2 = {
'id': 'f32582b5-8694-4be8-9a52-c5732f601c9d',
'network_id': network_1['id'],
'status': 'ACTIVE',
'mac_address': '71:ce:c7:8b:cd:dc',
'fixed_ips': [
{
'ip_address': '192.168.1.10',
'subnet_id': subnet_1['id']
}
],
'binding:vif_details': {},
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_1_port_3 = {
'id': '9c7580a0-8b01-41f3-ba07-a114709a4b74',
'network_id': network_1['id'],
'status': 'ACTIVE',
'mac_address': '71:ce:c7:2b:cd:dc',
'fixed_ips': [
{
'ip_address': '192.168.1.11',
'subnet_id': subnet_1['id']
}
],
'binding:vif_details': {},
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_2_port_1 = {
'id': '67d36444-6353-40f5-9e92-59346cf0dfda',
'network_id': network_2['id'],
'status': 'ACTIVE',
'mac_address': 'd2:0b:fd:d7:89:9b',
'fixed_ips': [
{
'ip_address': '192.168.1.6',
'subnet_id': subnet_2['id']
}
],
'binding:vif_details': {},
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_3_port_1 = {
'id': '4bfa1dc4-4354-4840-b0b4-f06196fa1344',
'network_id': network_3['id'],
'status': 'ACTIVE',
'mac_address': 'd2:0b:fd:99:89:9b',
'fixed_ips': [
{
'ip_address': '192.168.2.6',
'subnet_id': subnet_3['id']
}
],
'binding:vif_details': {},
'binding:vif_type': 'ovs',
'binding:vnic_type': 'normal',
}
network_4_port_1 = {
'id': 'b4cd0b93-2ac8-40a7-9fa4-2cd680ccdf3e',
'network_id': network_4['id'],
'status': 'ACTIVE',
'mac_address': 'b5:bc:2e:e7:51:ee',
'fixed_ips': [
{
'ip_address': '192.168.4.6',
'subnet_id': subnet_4['id']
}
],
'binding:vif_details': {'vlan': 42},
'binding:vif_type': 'hw_veb',
'binding:vnic_type': 'direct',
}
network_4_port_2 = {
'id': '4a0e3b05-4704-4adb-bfb1-f31f0e4d1bdc',
'network_id': network_4['id'],
'status': 'ACTIVE',
'mac_address': 'b5:bc:2e:e7:51:ef',
'fixed_ips': [
{
'ip_address': '192.168.4.7',
'subnet_id': subnet_4['id']
}
],
'binding:vif_details': {'vlan': 42},
'binding:vif_type': 'hw_veb',
'binding:vnic_type': 'direct',
}
network_4_port_3 = {
'id': 'fb2de1a1-d096-41be-9dbe-43066da64804',
'network_id': network_4['id'],
'status': 'ACTIVE',
'mac_address': 'b5:bc:2e:e7:51:ff',
'fixed_ips': [
{
'ip_address': '192.168.4.8',
'subnet_id': subnet_4['id']
}
],
'binding:vif_details': {'vlan': 42},
'binding:vif_type': 'hw_veb',
'binding:vnic_type': 'direct',
}
def __init__(self, test):
super(LibvirtNeutronFixture, self).__init__(test)
self._networks = {
self.network_1['id']: self.network_1,
self.network_2['id']: self.network_2,
self.network_3['id']: self.network_3,
self.network_4['id']: self.network_4,
}
self._net1_ports = [self.network_1_port_2, self.network_1_port_3]
def create_port(self, body=None):
network_id = body['port']['network_id']
assert network_id in self._networks, ('Network %s not in fixture' %
network_id)
if network_id == self.network_1['id']:
port = self._net1_ports.pop(0)
elif network_id == self.network_2['id']:
port = self.network_2_port_1
elif network_id == self.network_3['id']:
port = self.network_3_port_1
elif network_id == self.network_4['id']:
port = self.network_4_port_1
# this copy is here to avoid modifying class variables like
# network_2_port_1 below at the update call
port = copy.deepcopy(port)
port.update(body['port'])
# the tenant ID is normally extracted from credentials in the request
# and is not present in the body
if 'tenant_id' not in port:
port['tenant_id'] = nova_fixtures.NeutronFixture.tenant_id
# similarly, these attributes are set by neutron itself
port['admin_state_up'] = True
self._ports[port['id']] = port
# this copy is here as nova sometimes modifies the returned port
# locally and we want to avoid that nova modifies the fixture internals
return {'port': copy.deepcopy(port)}
|
mahak/nova
|
nova/tests/functional/libvirt/base.py
|
Python
|
apache-2.0
| 14,789
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class DeleteSubnet(tables.DeleteAction):
data_type_singular = _("Subnet")
data_type_plural = _("Subnets")
def delete(self, request, obj_id):
try:
api.neutron.subnet_delete(request, obj_id)
except:
msg = _('Failed to delete subnet %s') % obj_id
LOG.info(msg)
network_id = self.table.kwargs['network_id']
redirect = reverse('horizon:admin:networks:detail',
args=[network_id])
exceptions.handle(request, msg, redirect=redirect)
class CreateSubnet(tables.LinkAction):
name = "create"
verbose_name = _("Create Subnet")
url = "horizon:admin:networks:addsubnet"
classes = ("ajax-modal", "btn-create")
def get_link_url(self, datum=None):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id,))
class UpdateSubnet(tables.LinkAction):
name = "update"
verbose_name = _("Edit Subnet")
url = "horizon:admin:networks:editsubnet"
classes = ("ajax-modal", "btn-edit")
def get_link_url(self, subnet):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, subnet.id))
class SubnetsTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Name"),
link='horizon:admin:networks:subnets:detail')
cidr = tables.Column("cidr", verbose_name=_("CIDR"))
ip_version = tables.Column("ipver_str", verbose_name=_("IP Version"))
gateway_ip = tables.Column("gateway_ip", verbose_name=_("Gateway IP"))
def get_object_display(self, subnet):
return subnet.id
class Meta:
name = "subnets"
verbose_name = _("Subnets")
table_actions = (CreateSubnet, DeleteSubnet)
row_actions = (UpdateSubnet, DeleteSubnet,)
|
tuskar/tuskar-ui
|
openstack_dashboard/dashboards/admin/networks/subnets/tables.py
|
Python
|
apache-2.0
| 2,757
|
import json
import logging
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import AbstractProvider, PreprintProvider, PreprintService, Subject
from osf.models.provider import rules_to_subjects
from scripts import utils as script_utils
from osf.models.validators import validate_subject_hierarchy
from website.preprints.tasks import on_preprint_updated
logger = logging.getLogger(__name__)
BEPRESS_PROVIDER = None
def validate_input(custom_provider, data, provider_type='osf.preprintprovider', copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
logger.info('Validating data')
includes = data.get('include', [])
excludes = data.get('exclude', [])
customs = data.get('custom', {})
merges = data.get('merge', {})
if copy:
included_subjects = rules_to_subjects(custom_provider.subjects_acceptable)
else:
assert not set(includes) & set(excludes), 'There must be no overlap between includes and excludes'
for text in includes:
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=text).exists(), 'Unable to find included subject with text {}'.format(text)
included_subjects = Subject.objects.filter(provider=BEPRESS_PROVIDER, text__in=includes).include_children()
logger.info('Successfully validated `include`')
for text in excludes:
try:
Subject.objects.get(provider=BEPRESS_PROVIDER, text=text)
except Subject.DoesNotExist:
raise RuntimeError('Unable to find excluded subject with text {}'.format(text))
assert included_subjects.filter(text=text).exists(), 'Excluded subject with text {} was not included'.format(text)
included_subjects = included_subjects.exclude(text__in=excludes)
logger.info('Successfully validated `exclude`')
for cust_name, map_dict in customs.items():
assert not included_subjects.filter(text=cust_name).exists(), 'Custom text {} already exists in mapped set'.format(cust_name)
assert Subject.objects.filter(provider=BEPRESS_PROVIDER, text=map_dict.get('bepress')).exists(), 'Unable to find specified BePress subject with text {}'.format(map_dict.get('bepress'))
if map_dict.get('parent'): # Null parent possible
assert map_dict['parent'] in set(customs.keys()) | set(included_subjects.values_list('text', flat=True)), 'Unable to find specified parent with text {} in mapped set'.format(map_dict['parent'])
# TODO: hierarchy length validation? Probably more trouble than worth here, done on .save
logger.info('Successfully validated `custom`')
included_subjects = included_subjects | Subject.objects.filter(text__in=[map_dict['bepress'] for map_dict in customs.values()])
for merged_from, merged_into in merges.items():
assert not included_subjects.filter(text=merged_from).exists(), 'Cannot merge subject "{}" that will be included'.format(merged_from)
assert merged_into in set(included_subjects.values_list('text', flat=True)) | set(customs.keys()), 'Unable to determine merge target for "{}"'.format(merged_into)
included_subjects = included_subjects | Subject.objects.filter(text__in=merges.keys())
missing_subjects = Subject.objects.filter(id__in=set([hier[-1].id for ps in PreprintService.objects.filter(provider=custom_provider) for hier in ps.subject_hierarchy])).exclude(id__in=included_subjects.values_list('id', flat=True))
if not add_missing:
assert not missing_subjects.exists(), 'Incomplete mapping -- following subjects in use but not included:\n{}'.format(list(missing_subjects.values_list('text', flat=True)))
if isinstance(custom_provider, PreprintProvider):
assert custom_provider.share_title not in [None, '', 'bepress'], 'share title not set; please set the share title on this provider before creating a custom taxonomy.'
logger.info('Successfully validated mapping completeness')
return list(missing_subjects) if add_missing else None
def create_subjects_recursive(custom_provider, root_text, exclude_texts, parent=None):
logger.info('Duplicating BePress subject {} on {}'.format(root_text, custom_provider._id))
bepress_subj = Subject.objects.get(provider=BEPRESS_PROVIDER, text=root_text)
custom_subj = Subject(text=root_text, parent=parent, bepress_subject=bepress_subj, provider=custom_provider)
custom_subj.save()
# This is not a problem now, as all excluded subjects are leafs, but it could be problematic if non-leafs had their children excluded.
# It could also be problematic if they didn't, if any of those children are used by existing preprints.
# TODO: Determine correct resolution
for child_text in bepress_subj.children.exclude(text__in=exclude_texts).values_list('text', flat=True):
create_subjects_recursive(custom_provider, child_text, exclude_texts, parent=custom_subj)
def create_from_subjects_acceptable(custom_provider, add_missing=False, missing=None):
tries = 0
subjects_to_copy = list(rules_to_subjects(custom_provider.subjects_acceptable))
if missing and add_missing:
subjects_to_copy = subjects_to_copy + missing
while len(subjects_to_copy):
previous_len = len(subjects_to_copy)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map subjects acceptable with 10 iterations -- subjects remaining: {}'.format(subjects_to_copy))
for subj in list(subjects_to_copy):
if map_custom_subject(custom_provider, subj.text, subj.parent.text if subj.parent else None, subj.text):
subjects_to_copy.remove(subj)
elif add_missing and subj.parent and subj.parent not in subjects_to_copy:
# Dirty
subjects_to_copy.append(subj.parent)
previous_len += 1
else:
logger.warn('Failed. Retrying next iteration')
new_len = len(subjects_to_copy)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- subjects remaining: {}'.format(subjects_to_copy))
def do_create_subjects(custom_provider, includes, excludes, copy=False, add_missing=False, missing=None):
if copy:
create_from_subjects_acceptable(custom_provider, add_missing=add_missing, missing=missing)
else:
for root_text in includes:
create_subjects_recursive(custom_provider, root_text, excludes)
def map_custom_subject(custom_provider, name, parent, mapping):
logger.info('Attempting to create subject {} on {} from {} with {}'.format(name, custom_provider._id, mapping, 'parent {}'.format(parent) if parent else 'no parent'))
if parent:
parent_subject = Subject.objects.filter(provider=custom_provider, text=parent).first()
else:
parent_subject = None
bepress_subject = Subject.objects.get(provider=BEPRESS_PROVIDER, text=mapping)
if parent and not parent_subject:
return False
custom_subject = Subject(provider=custom_provider, text=name, parent=parent_subject, bepress_subject=bepress_subject)
custom_subject.save()
return True
def do_custom_mapping(custom_provider, customs):
tries = 0
unmapped_customs = customs
while len(unmapped_customs):
previous_len = len(unmapped_customs)
tries += 1
if tries == 10:
raise RuntimeError('Unable to map custom subjects with 10 iterations -- invalid input')
successes = []
for cust_name, map_dict in unmapped_customs.items():
if map_custom_subject(custom_provider, cust_name, map_dict.get('parent'), map_dict.get('bepress')):
successes.append(cust_name)
else:
logger.warn('Failed. Retrying next iteration')
[unmapped_customs.pop(key) for key in successes]
new_len = len(unmapped_customs)
if new_len == previous_len:
raise RuntimeError('Unable to map any custom subjects on iteration -- invalid input')
def map_preprints_to_custom_subjects(custom_provider, merge_dict, dry_run=False):
for preprint in PreprintService.objects.filter(provider=custom_provider):
logger.info('Preparing to migrate preprint {}'.format(preprint.id))
old_hier = preprint.subject_hierarchy
subjects_to_map = [hier[-1] for hier in old_hier]
merged_subject_ids = set(Subject.objects.filter(provider=custom_provider, text__in=[merge_dict[k] for k in set(merge_dict.keys()) & set([s.text for s in subjects_to_map])]).values_list('id', flat=True))
subject_ids_to_map = set(s.id for s in subjects_to_map if s.text not in merge_dict.keys())
aliased_subject_ids = set(Subject.objects.filter(bepress_subject__id__in=subject_ids_to_map, provider=custom_provider).values_list('id', flat=True)) | merged_subject_ids
aliased_hiers = [s.object_hierarchy for s in Subject.objects.filter(id__in=aliased_subject_ids)]
old_subjects = list(preprint.subjects.values_list('id', flat=True))
preprint.subjects.clear()
for hier in aliased_hiers:
validate_subject_hierarchy([s._id for s in hier])
for s in hier:
preprint.subjects.add(s)
# Update preprint in SHARE
if not dry_run:
on_preprint_updated(preprint._id, old_subjects=old_subjects, update_share=True)
preprint.reload()
new_hier = [s.object_hierarchy for s in preprint.subjects.exclude(children__in=preprint.subjects.all())]
logger.info('Successfully migrated preprint {}.\n\tOld hierarchy:{}\n\tNew hierarchy:{}'.format(preprint.id, old_hier, new_hier))
def migrate(provider=None, provider_type='osf.preprintprovider', share_title=None, data=None, dry_run=False, copy=False, add_missing=False):
# This function may be run outside of this command (e.g. in the admin app) so we
# need to make sure that BEPRESS_PROVIDER is set
global BEPRESS_PROVIDER
if not BEPRESS_PROVIDER:
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
custom_provider = AbstractProvider.objects.filter(_id=provider, type=provider_type).first()
assert custom_provider, 'Unable to find specified provider: {}'.format(provider)
assert custom_provider.id != BEPRESS_PROVIDER.id, 'Cannot add custom mapping to BePress provider'
assert not custom_provider.subjects.exists(), 'Provider aldready has a custom taxonomy'
if isinstance(custom_provider, PreprintProvider) and custom_provider.share_title in [None, '', 'bepress']:
if not share_title:
raise RuntimeError('`--share-title` is required if not already set on the provider')
custom_provider.share_title = share_title
custom_provider.save()
missing = validate_input(custom_provider, data, provider_type=provider_type, copy=copy, add_missing=add_missing)
do_create_subjects(custom_provider, data['include'], data.get('exclude', []), copy=copy, add_missing=add_missing, missing=missing)
do_custom_mapping(custom_provider, data.get('custom', {}))
map_preprints_to_custom_subjects(custom_provider, data.get('merge', {}), dry_run=dry_run)
class Command(BaseCommand):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
parser.add_argument(
'--data',
action='store',
dest='data',
help='List of targets, of form {\n"include": [<list of subject texts to include at top level, children implicit>],'
'\n"exclude": [<list of children to exclude from included trees>],'
'\n"custom": [{"<Custom Name": {"parent": <Parent text>", "bepress": "<Bepress Name>"}}, ...]'
'\n"merge": {"<Merged from (bepress)>": "<Merged into (custom)", ...}}',
)
parser.add_argument(
'--provider',
action='store',
dest='provider',
required=True,
help='_id of the <provider> object, e.g. "osf". <provider> is expected to not already have a custom taxonomy.'
)
parser.add_argument(
'--from-subjects-acceptable',
action='store_true',
dest='from_subjects_acceptable',
help='Specifies that the provider\'s `subjects_acceptable` be copied. `data.include` and `exclude` are ignored, the other keys may still be used'
)
parser.add_argument(
'--add-missing',
action='store_true',
dest='add_missing',
help='Adds "used-but-not-included" subjects.'
)
parser.add_argument(
'--share-title',
action='store',
type=str,
dest='share_title',
help='Sets <provider>.share_title. Ignored if already set on provider, required if not.'
)
parser.add_argument(
'--type',
action='store',
type=str,
dest='provider_type',
help='Specifies provider type [`osf.preprintprovider`, `osf.registrationprovider`, `osf.collectionprovider`]'
)
def handle(self, *args, **options):
global BEPRESS_PROVIDER
provider_type = options.get('provider_type') or 'osf.preprintprovider'
BEPRESS_PROVIDER = AbstractProvider.objects.filter(_id='osf', type='osf.preprintprovider').first()
dry_run = options.get('dry_run')
provider = options['provider']
data = json.loads(options['data'] or '{}')
share_title = options.get('share_title')
copy = options.get('from_subjects_acceptable')
add_missing = options.get('add_missing')
if copy:
data['include'] = list(Subject.objects.filter(provider=BEPRESS_PROVIDER, parent__isnull=True).values_list('text', flat=True))
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
migrate(provider=provider, share_title=share_title, provider_type=provider_type, data=data, dry_run=dry_run, copy=copy, add_missing=add_missing)
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
|
HalcyonChimera/osf.io
|
osf/management/commands/populate_custom_taxonomies.py
|
Python
|
apache-2.0
| 14,704
|
import os
from geotrek.flatpages.models import FlatPage
from geotrek.flatpages.views import FlatPageViewSet, FlatPageMeta
from django.db.models import Q
class SyncRando:
def __init__(self, sync):
self.global_sync = sync
def sync(self, lang):
self.global_sync.sync_geojson(lang, FlatPageViewSet, 'flatpages.geojson', zipfile=self.global_sync.zipfile)
flatpages = FlatPage.objects.filter(published=True)
if self.global_sync.source:
flatpages = flatpages.filter(source__name__in=self.global_sync.source)
if self.global_sync.portal:
flatpages = flatpages.filter(Q(portal__name=self.global_sync.portal) | Q(portal=None))
for flatpage in flatpages:
name = os.path.join('meta', lang, flatpage.rando_url, 'index.html')
self.global_sync.sync_view(lang, FlatPageMeta.as_view(), name, pk=flatpage.pk,
params={'rando_url': self.global_sync.rando_url})
|
makinacorpus/Geotrek
|
geotrek/flatpages/helpers_sync.py
|
Python
|
bsd-2-clause
| 988
|
# -*- coding: utf-8 -*-
from .dev import * # noqa
INSTALLED_APPS += (
'django_extensions',
)
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'healthsites_dev',
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
# Set to empty string for default.
'PORT': '',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
# define output formats
'verbose': {
'format': (
'%(levelname)s %(name)s %(asctime)s %(module)s %(process)d '
'%(thread)d %(message)s')
},
'simple': {
'format': (
'%(name)s %(levelname)s %(filename)s L%(lineno)s: '
'%(message)s')
},
},
'handlers': {
# console output
'console': {
'class': 'logging.StreamHandler',
'formatter': 'simple',
'level': 'DEBUG',
},
# 'logfile': {
# 'class': 'logging.FileHandler',
# 'filename': '/tmp/app-dev.log',
# 'formatter': 'simple',
# 'level': 'DEBUG',
# }
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'INFO', # switch to DEBUG to show actual SQL
},
# example app logger
'localities': {
'level': 'DEBUG',
'handlers': ['console'],
# propagate is True by default, which proppagates logs upstream
'propagate': False
}
},
# root logger
# non handled logs will propagate to the root logger
'root': {
'handlers': ['console'],
'level': 'WARNING'
}
}
|
ismailsunni/healthsites
|
django_project/core/settings/dev_dodobas.py
|
Python
|
bsd-2-clause
| 1,782
|
"""This file contains information on how to translate different ufuncs
into numba. It is a database of different ufuncs and how each of its
loops maps to a function that implements the inner kernel of that ufunc
(the inner kernel being the per-element function).
Use the function get_ufunc_info to get the information related to the
ufunc
"""
from __future__ import print_function, division, absolute_import
import numpy as np
# this is lazily initialized to avoid circular imports
_ufunc_db = None
def _lazy_init_db():
global _ufunc_db
if _ufunc_db is None:
_ufunc_db = {}
_fill_ufunc_db(_ufunc_db)
def get_ufuncs():
"""obtain a list of supported ufuncs in the db"""
_lazy_init_db()
return _ufunc_db.keys()
def get_ufunc_info(ufunc_key):
"""get the lowering information for the ufunc with key ufunc_key.
The lowering information is a dictionary that maps from a numpy
loop string (as given by the ufunc types attribute) to a function
that handles code generation for a scalar version of the ufunc
(that is, generates the "per element" operation").
raises a KeyError if the ufunc is not in the ufunc_db
"""
_lazy_init_db()
return _ufunc_db[ufunc_key]
def _fill_ufunc_db(ufunc_db):
# some of these imports would cause a problem of circular
# imports if done at global scope when importing the numba
# module.
from . import builtins, npyfuncs, cmathimpl
ufunc_db[np.negative] = {
'?->?': builtins.bool_invert_impl,
'b->b': builtins.int_negate_impl,
'B->B': builtins.int_negate_impl,
'h->h': builtins.int_negate_impl,
'H->H': builtins.int_negate_impl,
'i->i': builtins.int_negate_impl,
'I->I': builtins.int_negate_impl,
'l->l': builtins.int_negate_impl,
'L->L': builtins.int_negate_impl,
'q->q': builtins.int_negate_impl,
'Q->Q': builtins.int_negate_impl,
'f->f': builtins.real_negate_impl,
'd->d': builtins.real_negate_impl,
'F->F': builtins.complex_negate_impl,
'D->D': builtins.complex_negate_impl,
}
ufunc_db[np.absolute] = {
'?->?': builtins.int_abs_impl,
'b->b': builtins.int_abs_impl,
'B->B': builtins.uint_abs_impl,
'h->h': builtins.int_abs_impl,
'H->H': builtins.uint_abs_impl,
'i->i': builtins.int_abs_impl,
'I->I': builtins.uint_abs_impl,
'l->l': builtins.int_abs_impl,
'L->L': builtins.uint_abs_impl,
'q->q': builtins.int_abs_impl,
'Q->Q': builtins.uint_abs_impl,
'f->f': builtins.real_abs_impl,
'd->d': builtins.real_abs_impl,
'F->f': builtins.complex_abs_impl,
'D->d': builtins.complex_abs_impl,
}
ufunc_db[np.sign] = {
'b->b': builtins.int_sign_impl,
'B->B': builtins.int_sign_impl,
'h->h': builtins.int_sign_impl,
'H->H': builtins.int_sign_impl,
'i->i': builtins.int_sign_impl,
'I->I': builtins.int_sign_impl,
'l->l': builtins.int_sign_impl,
'L->L': builtins.int_sign_impl,
'q->q': builtins.int_sign_impl,
'Q->Q': builtins.int_sign_impl,
'f->f': builtins.real_sign_impl,
'd->d': builtins.real_sign_impl,
'F->F': npyfuncs.np_complex_sign_impl,
'D->D': npyfuncs.np_complex_sign_impl,
}
ufunc_db[np.add] = {
'??->?': builtins.int_or_impl,
'bb->b': builtins.int_add_impl,
'BB->B': builtins.int_add_impl,
'hh->h': builtins.int_add_impl,
'HH->H': builtins.int_add_impl,
'ii->i': builtins.int_add_impl,
'II->I': builtins.int_add_impl,
'll->l': builtins.int_add_impl,
'LL->L': builtins.int_add_impl,
'qq->q': builtins.int_add_impl,
'QQ->Q': builtins.int_add_impl,
'ff->f': builtins.real_add_impl,
'dd->d': builtins.real_add_impl,
'FF->F': builtins.complex_add_impl,
'DD->D': builtins.complex_add_impl,
}
ufunc_db[np.subtract] = {
'??->?': builtins.int_xor_impl,
'bb->b': builtins.int_sub_impl,
'BB->B': builtins.int_sub_impl,
'hh->h': builtins.int_sub_impl,
'HH->H': builtins.int_sub_impl,
'ii->i': builtins.int_sub_impl,
'II->I': builtins.int_sub_impl,
'll->l': builtins.int_sub_impl,
'LL->L': builtins.int_sub_impl,
'qq->q': builtins.int_sub_impl,
'QQ->Q': builtins.int_sub_impl,
'ff->f': builtins.real_sub_impl,
'dd->d': builtins.real_sub_impl,
'FF->F': builtins.complex_sub_impl,
'DD->D': builtins.complex_sub_impl,
}
ufunc_db[np.multiply] = {
'??->?': builtins.int_and_impl,
'bb->b': builtins.int_mul_impl,
'BB->B': builtins.int_mul_impl,
'hh->h': builtins.int_mul_impl,
'HH->H': builtins.int_mul_impl,
'ii->i': builtins.int_mul_impl,
'II->I': builtins.int_mul_impl,
'll->l': builtins.int_mul_impl,
'LL->L': builtins.int_mul_impl,
'qq->q': builtins.int_mul_impl,
'QQ->Q': builtins.int_mul_impl,
'ff->f': builtins.real_mul_impl,
'dd->d': builtins.real_mul_impl,
'FF->F': builtins.complex_mul_impl,
'DD->D': builtins.complex_mul_impl,
}
if np.divide != np.true_divide:
ufunc_db[np.divide] = {
'bb->b': npyfuncs.np_int_sdiv_impl,
'BB->B': npyfuncs.np_int_udiv_impl,
'hh->h': npyfuncs.np_int_sdiv_impl,
'HH->H': npyfuncs.np_int_udiv_impl,
'ii->i': npyfuncs.np_int_sdiv_impl,
'II->I': npyfuncs.np_int_udiv_impl,
'll->l': npyfuncs.np_int_sdiv_impl,
'LL->L': npyfuncs.np_int_udiv_impl,
'qq->q': npyfuncs.np_int_sdiv_impl,
'QQ->Q': npyfuncs.np_int_udiv_impl,
'ff->f': npyfuncs.np_real_div_impl,
'dd->d': npyfuncs.np_real_div_impl,
'FF->F': npyfuncs.np_complex_div_impl,
'DD->D': npyfuncs.np_complex_div_impl,
}
ufunc_db[np.true_divide] = {
'bb->d': npyfuncs.np_int_truediv_impl,
'BB->d': npyfuncs.np_int_truediv_impl,
'hh->d': npyfuncs.np_int_truediv_impl,
'HH->d': npyfuncs.np_int_truediv_impl,
'ii->d': npyfuncs.np_int_truediv_impl,
'II->d': npyfuncs.np_int_truediv_impl,
'll->d': npyfuncs.np_int_truediv_impl,
'LL->d': npyfuncs.np_int_truediv_impl,
'qq->d': npyfuncs.np_int_truediv_impl,
'QQ->d': npyfuncs.np_int_truediv_impl,
'ff->f': npyfuncs.np_real_div_impl,
'dd->d': npyfuncs.np_real_div_impl,
'FF->F': npyfuncs.np_complex_div_impl,
'DD->D': npyfuncs.np_complex_div_impl,
}
ufunc_db[np.floor_divide] = {
'bb->b': npyfuncs.np_int_sdiv_impl,
'BB->B': npyfuncs.np_int_udiv_impl,
'hh->h': npyfuncs.np_int_sdiv_impl,
'HH->H': npyfuncs.np_int_udiv_impl,
'ii->i': npyfuncs.np_int_sdiv_impl,
'II->I': npyfuncs.np_int_udiv_impl,
'll->l': npyfuncs.np_int_sdiv_impl,
'LL->L': npyfuncs.np_int_udiv_impl,
'qq->q': npyfuncs.np_int_sdiv_impl,
'QQ->Q': npyfuncs.np_int_udiv_impl,
'ff->f': npyfuncs.np_real_floor_div_impl,
'dd->d': npyfuncs.np_real_floor_div_impl,
'FF->F': npyfuncs.np_complex_floor_div_impl,
'DD->D': npyfuncs.np_complex_floor_div_impl,
}
ufunc_db[np.remainder] = {
'bb->b': npyfuncs.np_int_srem_impl,
'BB->B': npyfuncs.np_int_urem_impl,
'hh->h': npyfuncs.np_int_srem_impl,
'HH->H': npyfuncs.np_int_urem_impl,
'ii->i': npyfuncs.np_int_srem_impl,
'II->I': npyfuncs.np_int_urem_impl,
'll->l': npyfuncs.np_int_srem_impl,
'LL->L': npyfuncs.np_int_urem_impl,
'qq->q': npyfuncs.np_int_srem_impl,
'QQ->Q': npyfuncs.np_int_urem_impl,
'ff->f': npyfuncs.np_real_mod_impl,
'dd->d': npyfuncs.np_real_mod_impl,
}
ufunc_db[np.fmod] = {
'bb->b': npyfuncs.np_int_fmod_impl,
'BB->B': npyfuncs.np_int_fmod_impl,
'hh->h': npyfuncs.np_int_fmod_impl,
'HH->H': npyfuncs.np_int_fmod_impl,
'ii->i': npyfuncs.np_int_fmod_impl,
'II->I': npyfuncs.np_int_fmod_impl,
'll->l': npyfuncs.np_int_fmod_impl,
'LL->L': npyfuncs.np_int_fmod_impl,
'qq->q': npyfuncs.np_int_fmod_impl,
'QQ->Q': npyfuncs.np_int_fmod_impl,
'ff->f': npyfuncs.np_real_fmod_impl,
'dd->d': npyfuncs.np_real_fmod_impl,
}
ufunc_db[np.logaddexp] = {
'ff->f': npyfuncs.np_real_logaddexp_impl,
'dd->d': npyfuncs.np_real_logaddexp_impl,
}
ufunc_db[np.logaddexp2] = {
'ff->f': npyfuncs.np_real_logaddexp2_impl,
'dd->d': npyfuncs.np_real_logaddexp2_impl,
}
ufunc_db[np.power] = {
'bb->b': npyfuncs.np_int_power_impl,
'BB->B': npyfuncs.np_int_power_impl,
'hh->h': npyfuncs.np_int_power_impl,
'HH->H': npyfuncs.np_int_power_impl,
'ii->i': npyfuncs.np_int_power_impl,
'II->I': npyfuncs.np_int_power_impl,
'll->l': npyfuncs.np_int_power_impl,
'LL->L': npyfuncs.np_int_power_impl,
'qq->q': npyfuncs.np_int_power_impl,
'QQ->Q': npyfuncs.np_int_power_impl,
'ff->f': npyfuncs.np_real_power_impl,
'dd->d': npyfuncs.np_real_power_impl,
'FF->F': npyfuncs.np_complex_power_impl,
'DD->D': npyfuncs.np_complex_power_impl,
}
ufunc_db[np.rint] = {
'f->f': npyfuncs.np_real_rint_impl,
'd->d': npyfuncs.np_real_rint_impl,
'F->F': npyfuncs.np_complex_rint_impl,
'D->D': npyfuncs.np_complex_rint_impl,
}
ufunc_db[np.conjugate] = {
'b->b': builtins.real_conjugate_impl,
'B->B': builtins.real_conjugate_impl,
'h->h': builtins.real_conjugate_impl,
'H->H': builtins.real_conjugate_impl,
'i->i': builtins.real_conjugate_impl,
'I->I': builtins.real_conjugate_impl,
'l->l': builtins.real_conjugate_impl,
'L->L': builtins.real_conjugate_impl,
'q->q': builtins.real_conjugate_impl,
'Q->Q': builtins.real_conjugate_impl,
'f->f': builtins.real_conjugate_impl,
'd->d': builtins.real_conjugate_impl,
'F->F': builtins.complex_conjugate_impl,
'D->D': builtins.complex_conjugate_impl,
}
ufunc_db[np.exp] = {
'f->f': npyfuncs.np_real_exp_impl,
'd->d': npyfuncs.np_real_exp_impl,
'F->F': npyfuncs.np_complex_exp_impl,
'D->D': npyfuncs.np_complex_exp_impl,
}
ufunc_db[np.exp2] = {
'f->f': npyfuncs.np_real_exp2_impl,
'd->d': npyfuncs.np_real_exp2_impl,
'F->F': npyfuncs.np_complex_exp2_impl,
'D->D': npyfuncs.np_complex_exp2_impl,
}
ufunc_db[np.log] = {
'f->f': npyfuncs.np_real_log_impl,
'd->d': npyfuncs.np_real_log_impl,
'F->F': npyfuncs.np_complex_log_impl,
'D->D': npyfuncs.np_complex_log_impl,
}
ufunc_db[np.log2] = {
'f->f': npyfuncs.np_real_log2_impl,
'd->d': npyfuncs.np_real_log2_impl,
'F->F': npyfuncs.np_complex_log2_impl,
'D->D': npyfuncs.np_complex_log2_impl,
}
ufunc_db[np.log10] = {
'f->f': npyfuncs.np_real_log10_impl,
'd->d': npyfuncs.np_real_log10_impl,
'F->F': npyfuncs.np_complex_log10_impl,
'D->D': npyfuncs.np_complex_log10_impl,
}
ufunc_db[np.expm1] = {
'f->f': npyfuncs.np_real_expm1_impl,
'd->d': npyfuncs.np_real_expm1_impl,
'F->F': npyfuncs.np_complex_expm1_impl,
'D->D': npyfuncs.np_complex_expm1_impl,
}
ufunc_db[np.log1p] = {
'f->f': npyfuncs.np_real_log1p_impl,
'd->d': npyfuncs.np_real_log1p_impl,
'F->F': npyfuncs.np_complex_log1p_impl,
'D->D': npyfuncs.np_complex_log1p_impl,
}
ufunc_db[np.sqrt] = {
'f->f': npyfuncs.np_real_sqrt_impl,
'd->d': npyfuncs.np_real_sqrt_impl,
'F->F': npyfuncs.np_complex_sqrt_impl,
'D->D': npyfuncs.np_complex_sqrt_impl,
}
ufunc_db[np.square] = {
'b->b': npyfuncs.np_int_square_impl,
'B->B': npyfuncs.np_int_square_impl,
'h->h': npyfuncs.np_int_square_impl,
'H->H': npyfuncs.np_int_square_impl,
'i->i': npyfuncs.np_int_square_impl,
'I->I': npyfuncs.np_int_square_impl,
'l->l': npyfuncs.np_int_square_impl,
'L->L': npyfuncs.np_int_square_impl,
'q->q': npyfuncs.np_int_square_impl,
'Q->Q': npyfuncs.np_int_square_impl,
'f->f': npyfuncs.np_real_square_impl,
'd->d': npyfuncs.np_real_square_impl,
'F->F': npyfuncs.np_complex_square_impl,
'D->D': npyfuncs.np_complex_square_impl,
}
ufunc_db[np.reciprocal] = {
'b->b': npyfuncs.np_int_reciprocal_impl,
'B->B': npyfuncs.np_int_reciprocal_impl,
'h->h': npyfuncs.np_int_reciprocal_impl,
'H->H': npyfuncs.np_int_reciprocal_impl,
'i->i': npyfuncs.np_int_reciprocal_impl,
'I->I': npyfuncs.np_int_reciprocal_impl,
'l->l': npyfuncs.np_int_reciprocal_impl,
'L->L': npyfuncs.np_int_reciprocal_impl,
'q->q': npyfuncs.np_int_reciprocal_impl,
'Q->Q': npyfuncs.np_int_reciprocal_impl,
'f->f': npyfuncs.np_real_reciprocal_impl,
'd->d': npyfuncs.np_real_reciprocal_impl,
'F->F': npyfuncs.np_complex_reciprocal_impl,
'D->D': npyfuncs.np_complex_reciprocal_impl,
}
ufunc_db[np.sin] = {
'f->f': npyfuncs.np_real_sin_impl,
'd->d': npyfuncs.np_real_sin_impl,
'F->F': npyfuncs.np_complex_sin_impl,
'D->D': npyfuncs.np_complex_sin_impl,
}
ufunc_db[np.cos] = {
'f->f': npyfuncs.np_real_cos_impl,
'd->d': npyfuncs.np_real_cos_impl,
'F->F': npyfuncs.np_complex_cos_impl,
'D->D': npyfuncs.np_complex_cos_impl,
}
ufunc_db[np.tan] = {
'f->f': npyfuncs.np_real_tan_impl,
'd->d': npyfuncs.np_real_tan_impl,
'F->F': npyfuncs.np_complex_tan_impl,
'D->D': npyfuncs.np_complex_tan_impl,
}
ufunc_db[np.arcsin] = {
'f->f': npyfuncs.np_real_asin_impl,
'd->d': npyfuncs.np_real_asin_impl,
'F->F': npyfuncs.np_complex_asin_impl,
'D->D': npyfuncs.np_complex_asin_impl,
}
ufunc_db[np.arccos] = {
'f->f': npyfuncs.np_real_acos_impl,
'd->d': npyfuncs.np_real_acos_impl,
'F->F': cmathimpl.acos_impl,
'D->D': cmathimpl.acos_impl,
}
ufunc_db[np.arctan] = {
'f->f': npyfuncs.np_real_atan_impl,
'd->d': npyfuncs.np_real_atan_impl,
'F->F': npyfuncs.np_complex_atan_impl,
'D->D': npyfuncs.np_complex_atan_impl,
}
ufunc_db[np.arctan2] = {
'ff->f': npyfuncs.np_real_atan2_impl,
'dd->d': npyfuncs.np_real_atan2_impl,
}
ufunc_db[np.hypot] = {
'ff->f': npyfuncs.np_real_hypot_impl,
'dd->d': npyfuncs.np_real_hypot_impl,
}
ufunc_db[np.sinh] = {
'f->f': npyfuncs.np_real_sinh_impl,
'd->d': npyfuncs.np_real_sinh_impl,
'F->F': npyfuncs.np_complex_sinh_impl,
'D->D': npyfuncs.np_complex_sinh_impl,
}
ufunc_db[np.cosh] = {
'f->f': npyfuncs.np_real_cosh_impl,
'd->d': npyfuncs.np_real_cosh_impl,
'F->F': npyfuncs.np_complex_cosh_impl,
'D->D': npyfuncs.np_complex_cosh_impl,
}
ufunc_db[np.tanh] = {
'f->f': npyfuncs.np_real_tanh_impl,
'd->d': npyfuncs.np_real_tanh_impl,
'F->F': npyfuncs.np_complex_tanh_impl,
'D->D': npyfuncs.np_complex_tanh_impl,
}
ufunc_db[np.arcsinh] = {
'f->f': npyfuncs.np_real_asinh_impl,
'd->d': npyfuncs.np_real_asinh_impl,
'F->F': npyfuncs.np_complex_asinh_impl,
'D->D': npyfuncs.np_complex_asinh_impl,
}
ufunc_db[np.arccosh] = {
'f->f': npyfuncs.np_real_acosh_impl,
'd->d': npyfuncs.np_real_acosh_impl,
'F->F': npyfuncs.np_complex_acosh_impl,
'D->D': npyfuncs.np_complex_acosh_impl,
}
ufunc_db[np.arctanh] = {
'f->f': npyfuncs.np_real_atanh_impl,
'd->d': npyfuncs.np_real_atanh_impl,
'F->F': npyfuncs.np_complex_atanh_impl,
'D->D': npyfuncs.np_complex_atanh_impl,
}
ufunc_db[np.deg2rad] = {
'f->f': npyfuncs.np_real_deg2rad_impl,
'd->d': npyfuncs.np_real_deg2rad_impl,
}
ufunc_db[np.radians] = ufunc_db[np.deg2rad]
ufunc_db[np.rad2deg] = {
'f->f': npyfuncs.np_real_rad2deg_impl,
'd->d': npyfuncs.np_real_rad2deg_impl,
}
ufunc_db[np.degrees] = ufunc_db[np.rad2deg]
ufunc_db[np.floor] = {
'f->f': npyfuncs.np_real_floor_impl,
'd->d': npyfuncs.np_real_floor_impl,
}
ufunc_db[np.ceil] = {
'f->f': npyfuncs.np_real_ceil_impl,
'd->d': npyfuncs.np_real_ceil_impl,
}
ufunc_db[np.trunc] = {
'f->f': npyfuncs.np_real_trunc_impl,
'd->d': npyfuncs.np_real_trunc_impl,
}
ufunc_db[np.fabs] = {
'f->f': npyfuncs.np_real_fabs_impl,
'd->d': npyfuncs.np_real_fabs_impl,
}
# logical ufuncs
ufunc_db[np.greater] = {
'??->?': builtins.int_ugt_impl,
'bb->?': builtins.int_sgt_impl,
'BB->?': builtins.int_ugt_impl,
'hh->?': builtins.int_sgt_impl,
'HH->?': builtins.int_ugt_impl,
'ii->?': builtins.int_sgt_impl,
'II->?': builtins.int_ugt_impl,
'll->?': builtins.int_sgt_impl,
'LL->?': builtins.int_ugt_impl,
'qq->?': builtins.int_sgt_impl,
'QQ->?': builtins.int_ugt_impl,
'ff->?': builtins.real_gt_impl,
'dd->?': builtins.real_gt_impl,
'FF->?': npyfuncs.np_complex_gt_impl,
'DD->?': npyfuncs.np_complex_gt_impl,
}
ufunc_db[np.greater_equal] = {
'??->?': builtins.int_uge_impl,
'bb->?': builtins.int_sge_impl,
'BB->?': builtins.int_uge_impl,
'hh->?': builtins.int_sge_impl,
'HH->?': builtins.int_uge_impl,
'ii->?': builtins.int_sge_impl,
'II->?': builtins.int_uge_impl,
'll->?': builtins.int_sge_impl,
'LL->?': builtins.int_uge_impl,
'qq->?': builtins.int_sge_impl,
'QQ->?': builtins.int_uge_impl,
'ff->?': builtins.real_ge_impl,
'dd->?': builtins.real_ge_impl,
'FF->?': npyfuncs.np_complex_ge_impl,
'DD->?': npyfuncs.np_complex_ge_impl,
}
ufunc_db[np.less] = {
'??->?': builtins.int_ult_impl,
'bb->?': builtins.int_slt_impl,
'BB->?': builtins.int_ult_impl,
'hh->?': builtins.int_slt_impl,
'HH->?': builtins.int_ult_impl,
'ii->?': builtins.int_slt_impl,
'II->?': builtins.int_ult_impl,
'll->?': builtins.int_slt_impl,
'LL->?': builtins.int_ult_impl,
'qq->?': builtins.int_slt_impl,
'QQ->?': builtins.int_ult_impl,
'ff->?': builtins.real_lt_impl,
'dd->?': builtins.real_lt_impl,
'FF->?': npyfuncs.np_complex_lt_impl,
'DD->?': npyfuncs.np_complex_lt_impl,
}
ufunc_db[np.less_equal] = {
'??->?': builtins.int_ule_impl,
'bb->?': builtins.int_sle_impl,
'BB->?': builtins.int_ule_impl,
'hh->?': builtins.int_sle_impl,
'HH->?': builtins.int_ule_impl,
'ii->?': builtins.int_sle_impl,
'II->?': builtins.int_ule_impl,
'll->?': builtins.int_sle_impl,
'LL->?': builtins.int_ule_impl,
'qq->?': builtins.int_sle_impl,
'QQ->?': builtins.int_ule_impl,
'ff->?': builtins.real_le_impl,
'dd->?': builtins.real_le_impl,
'FF->?': npyfuncs.np_complex_le_impl,
'DD->?': npyfuncs.np_complex_le_impl,
}
ufunc_db[np.not_equal] = {
'??->?': builtins.int_ne_impl,
'bb->?': builtins.int_ne_impl,
'BB->?': builtins.int_ne_impl,
'hh->?': builtins.int_ne_impl,
'HH->?': builtins.int_ne_impl,
'ii->?': builtins.int_ne_impl,
'II->?': builtins.int_ne_impl,
'll->?': builtins.int_ne_impl,
'LL->?': builtins.int_ne_impl,
'qq->?': builtins.int_ne_impl,
'QQ->?': builtins.int_ne_impl,
'ff->?': builtins.real_ne_impl,
'dd->?': builtins.real_ne_impl,
'FF->?': npyfuncs.np_complex_ne_impl,
'DD->?': npyfuncs.np_complex_ne_impl,
}
ufunc_db[np.equal] = {
'??->?': builtins.int_eq_impl,
'bb->?': builtins.int_eq_impl,
'BB->?': builtins.int_eq_impl,
'hh->?': builtins.int_eq_impl,
'HH->?': builtins.int_eq_impl,
'ii->?': builtins.int_eq_impl,
'II->?': builtins.int_eq_impl,
'll->?': builtins.int_eq_impl,
'LL->?': builtins.int_eq_impl,
'qq->?': builtins.int_eq_impl,
'QQ->?': builtins.int_eq_impl,
'ff->?': builtins.real_eq_impl,
'dd->?': builtins.real_eq_impl,
'FF->?': npyfuncs.np_complex_eq_impl,
'DD->?': npyfuncs.np_complex_eq_impl,
}
ufunc_db[np.logical_and] = {
'??->?': npyfuncs.np_logical_and_impl,
'bb->?': npyfuncs.np_logical_and_impl,
'BB->?': npyfuncs.np_logical_and_impl,
'hh->?': npyfuncs.np_logical_and_impl,
'HH->?': npyfuncs.np_logical_and_impl,
'ii->?': npyfuncs.np_logical_and_impl,
'II->?': npyfuncs.np_logical_and_impl,
'll->?': npyfuncs.np_logical_and_impl,
'LL->?': npyfuncs.np_logical_and_impl,
'qq->?': npyfuncs.np_logical_and_impl,
'QQ->?': npyfuncs.np_logical_and_impl,
'ff->?': npyfuncs.np_logical_and_impl,
'dd->?': npyfuncs.np_logical_and_impl,
'FF->?': npyfuncs.np_complex_logical_and_impl,
'DD->?': npyfuncs.np_complex_logical_and_impl,
}
ufunc_db[np.logical_or] = {
'??->?': npyfuncs.np_logical_or_impl,
'bb->?': npyfuncs.np_logical_or_impl,
'BB->?': npyfuncs.np_logical_or_impl,
'hh->?': npyfuncs.np_logical_or_impl,
'HH->?': npyfuncs.np_logical_or_impl,
'ii->?': npyfuncs.np_logical_or_impl,
'II->?': npyfuncs.np_logical_or_impl,
'll->?': npyfuncs.np_logical_or_impl,
'LL->?': npyfuncs.np_logical_or_impl,
'qq->?': npyfuncs.np_logical_or_impl,
'QQ->?': npyfuncs.np_logical_or_impl,
'ff->?': npyfuncs.np_logical_or_impl,
'dd->?': npyfuncs.np_logical_or_impl,
'FF->?': npyfuncs.np_complex_logical_or_impl,
'DD->?': npyfuncs.np_complex_logical_or_impl,
}
ufunc_db[np.logical_xor] = {
'??->?': npyfuncs.np_logical_xor_impl,
'bb->?': npyfuncs.np_logical_xor_impl,
'BB->?': npyfuncs.np_logical_xor_impl,
'hh->?': npyfuncs.np_logical_xor_impl,
'HH->?': npyfuncs.np_logical_xor_impl,
'ii->?': npyfuncs.np_logical_xor_impl,
'II->?': npyfuncs.np_logical_xor_impl,
'll->?': npyfuncs.np_logical_xor_impl,
'LL->?': npyfuncs.np_logical_xor_impl,
'qq->?': npyfuncs.np_logical_xor_impl,
'QQ->?': npyfuncs.np_logical_xor_impl,
'ff->?': npyfuncs.np_logical_xor_impl,
'dd->?': npyfuncs.np_logical_xor_impl,
'FF->?': npyfuncs.np_complex_logical_xor_impl,
'DD->?': npyfuncs.np_complex_logical_xor_impl,
}
ufunc_db[np.logical_not] = {
'?->?': npyfuncs.np_logical_not_impl,
'b->?': npyfuncs.np_logical_not_impl,
'B->?': npyfuncs.np_logical_not_impl,
'h->?': npyfuncs.np_logical_not_impl,
'H->?': npyfuncs.np_logical_not_impl,
'i->?': npyfuncs.np_logical_not_impl,
'I->?': npyfuncs.np_logical_not_impl,
'l->?': npyfuncs.np_logical_not_impl,
'L->?': npyfuncs.np_logical_not_impl,
'q->?': npyfuncs.np_logical_not_impl,
'Q->?': npyfuncs.np_logical_not_impl,
'f->?': npyfuncs.np_logical_not_impl,
'd->?': npyfuncs.np_logical_not_impl,
'F->?': npyfuncs.np_complex_logical_not_impl,
'D->?': npyfuncs.np_complex_logical_not_impl,
}
ufunc_db[np.maximum] = {
'??->?': npyfuncs.np_logical_or_impl,
'bb->b': npyfuncs.np_int_smax_impl,
'BB->B': npyfuncs.np_int_umax_impl,
'hh->h': npyfuncs.np_int_smax_impl,
'HH->H': npyfuncs.np_int_umax_impl,
'ii->i': npyfuncs.np_int_smax_impl,
'II->I': npyfuncs.np_int_umax_impl,
'll->l': npyfuncs.np_int_smax_impl,
'LL->L': npyfuncs.np_int_umax_impl,
'qq->q': npyfuncs.np_int_smax_impl,
'QQ->Q': npyfuncs.np_int_umax_impl,
'ff->f': npyfuncs.np_real_maximum_impl,
'dd->d': npyfuncs.np_real_maximum_impl,
'FF->F': npyfuncs.np_complex_maximum_impl,
'DD->D': npyfuncs.np_complex_maximum_impl,
}
ufunc_db[np.minimum] = {
'??->?': npyfuncs.np_logical_and_impl,
'bb->b': npyfuncs.np_int_smin_impl,
'BB->B': npyfuncs.np_int_umin_impl,
'hh->h': npyfuncs.np_int_smin_impl,
'HH->H': npyfuncs.np_int_umin_impl,
'ii->i': npyfuncs.np_int_smin_impl,
'II->I': npyfuncs.np_int_umin_impl,
'll->l': npyfuncs.np_int_smin_impl,
'LL->L': npyfuncs.np_int_umin_impl,
'qq->q': npyfuncs.np_int_smin_impl,
'QQ->Q': npyfuncs.np_int_umin_impl,
'ff->f': npyfuncs.np_real_minimum_impl,
'dd->d': npyfuncs.np_real_minimum_impl,
'FF->F': npyfuncs.np_complex_minimum_impl,
'DD->D': npyfuncs.np_complex_minimum_impl,
}
ufunc_db[np.fmax] = {
'??->?': npyfuncs.np_logical_or_impl,
'bb->b': npyfuncs.np_int_smax_impl,
'BB->B': npyfuncs.np_int_umax_impl,
'hh->h': npyfuncs.np_int_smax_impl,
'HH->H': npyfuncs.np_int_umax_impl,
'ii->i': npyfuncs.np_int_smax_impl,
'II->I': npyfuncs.np_int_umax_impl,
'll->l': npyfuncs.np_int_smax_impl,
'LL->L': npyfuncs.np_int_umax_impl,
'qq->q': npyfuncs.np_int_smax_impl,
'QQ->Q': npyfuncs.np_int_umax_impl,
'ff->f': npyfuncs.np_real_fmax_impl,
'dd->d': npyfuncs.np_real_fmax_impl,
'FF->F': npyfuncs.np_complex_fmax_impl,
'DD->D': npyfuncs.np_complex_fmax_impl,
}
ufunc_db[np.fmin] = {
'??->?': npyfuncs.np_logical_and_impl,
'bb->b': npyfuncs.np_int_smin_impl,
'BB->B': npyfuncs.np_int_umin_impl,
'hh->h': npyfuncs.np_int_smin_impl,
'HH->H': npyfuncs.np_int_umin_impl,
'ii->i': npyfuncs.np_int_smin_impl,
'II->I': npyfuncs.np_int_umin_impl,
'll->l': npyfuncs.np_int_smin_impl,
'LL->L': npyfuncs.np_int_umin_impl,
'qq->q': npyfuncs.np_int_smin_impl,
'QQ->Q': npyfuncs.np_int_umin_impl,
'ff->f': npyfuncs.np_real_fmin_impl,
'dd->d': npyfuncs.np_real_fmin_impl,
'FF->F': npyfuncs.np_complex_fmin_impl,
'DD->D': npyfuncs.np_complex_fmin_impl,
}
# misc floating functions
ufunc_db[np.isnan] = {
'f->?': npyfuncs.np_real_isnan_impl,
'd->?': npyfuncs.np_real_isnan_impl,
'F->?': npyfuncs.np_complex_isnan_impl,
'D->?': npyfuncs.np_complex_isnan_impl,
}
ufunc_db[np.isinf] = {
'f->?': npyfuncs.np_real_isinf_impl,
'd->?': npyfuncs.np_real_isinf_impl,
'F->?': npyfuncs.np_complex_isinf_impl,
'D->?': npyfuncs.np_complex_isinf_impl,
}
ufunc_db[np.isfinite] = {
'f->?': npyfuncs.np_real_isfinite_impl,
'd->?': npyfuncs.np_real_isfinite_impl,
'F->?': npyfuncs.np_complex_isfinite_impl,
'D->?': npyfuncs.np_complex_isfinite_impl,
}
ufunc_db[np.signbit] = {
'f->?': npyfuncs.np_real_signbit_impl,
'd->?': npyfuncs.np_real_signbit_impl,
}
ufunc_db[np.copysign] = {
'ff->f': npyfuncs.np_real_copysign_impl,
'dd->d': npyfuncs.np_real_copysign_impl,
}
ufunc_db[np.nextafter] = {
'ff->f': npyfuncs.np_real_nextafter_impl,
'dd->d': npyfuncs.np_real_nextafter_impl,
}
ufunc_db[np.spacing] = {
'f->f': npyfuncs.np_real_spacing_impl,
'd->d': npyfuncs.np_real_spacing_impl,
}
ufunc_db[np.ldexp] = {
'fi->f': npyfuncs.np_real_ldexp_impl,
'fl->f': npyfuncs.np_real_ldexp_impl,
'di->d': npyfuncs.np_real_ldexp_impl,
'dl->d': npyfuncs.np_real_ldexp_impl,
}
# bit twiddling functions
ufunc_db[np.bitwise_and] = {
'??->?': builtins.int_and_impl,
'bb->b': builtins.int_and_impl,
'BB->B': builtins.int_and_impl,
'hh->h': builtins.int_and_impl,
'HH->H': builtins.int_and_impl,
'ii->i': builtins.int_and_impl,
'II->I': builtins.int_and_impl,
'll->l': builtins.int_and_impl,
'LL->L': builtins.int_and_impl,
'qq->q': builtins.int_and_impl,
'QQ->Q': builtins.int_and_impl,
}
ufunc_db[np.bitwise_or] = {
'??->?': builtins.int_or_impl,
'bb->b': builtins.int_or_impl,
'BB->B': builtins.int_or_impl,
'hh->h': builtins.int_or_impl,
'HH->H': builtins.int_or_impl,
'ii->i': builtins.int_or_impl,
'II->I': builtins.int_or_impl,
'll->l': builtins.int_or_impl,
'LL->L': builtins.int_or_impl,
'qq->q': builtins.int_or_impl,
'QQ->Q': builtins.int_or_impl,
}
ufunc_db[np.bitwise_xor] = {
'??->?': builtins.int_xor_impl,
'bb->b': builtins.int_xor_impl,
'BB->B': builtins.int_xor_impl,
'hh->h': builtins.int_xor_impl,
'HH->H': builtins.int_xor_impl,
'ii->i': builtins.int_xor_impl,
'II->I': builtins.int_xor_impl,
'll->l': builtins.int_xor_impl,
'LL->L': builtins.int_xor_impl,
'qq->q': builtins.int_xor_impl,
'QQ->Q': builtins.int_xor_impl,
}
ufunc_db[np.invert] = { # aka np.bitwise_not
'?->?': builtins.bool_invert_impl,
'b->b': builtins.int_invert_impl,
'B->B': builtins.int_invert_impl,
'h->h': builtins.int_invert_impl,
'H->H': builtins.int_invert_impl,
'i->i': builtins.int_invert_impl,
'I->I': builtins.int_invert_impl,
'l->l': builtins.int_invert_impl,
'L->L': builtins.int_invert_impl,
'q->q': builtins.int_invert_impl,
'Q->Q': builtins.int_invert_impl,
}
ufunc_db[np.left_shift] = {
'bb->b': builtins.int_shl_impl,
'BB->B': builtins.int_shl_impl,
'hh->h': builtins.int_shl_impl,
'HH->H': builtins.int_shl_impl,
'ii->i': builtins.int_shl_impl,
'II->I': builtins.int_shl_impl,
'll->l': builtins.int_shl_impl,
'LL->L': builtins.int_shl_impl,
'qq->q': builtins.int_shl_impl,
'QQ->Q': builtins.int_shl_impl,
}
ufunc_db[np.right_shift] = {
'bb->b': builtins.int_shr_impl,
'BB->B': builtins.int_shr_impl,
'hh->h': builtins.int_shr_impl,
'HH->H': builtins.int_shr_impl,
'ii->i': builtins.int_shr_impl,
'II->I': builtins.int_shr_impl,
'll->l': builtins.int_shr_impl,
'LL->L': builtins.int_shr_impl,
'qq->q': builtins.int_shr_impl,
'QQ->Q': builtins.int_shr_impl,
}
# Inject datetime64 support
try:
from . import npdatetime
except NotImplementedError:
# Numpy 1.6
pass
else:
ufunc_db[np.negative].update({
'm->m': npdatetime.timedelta_neg_impl,
})
ufunc_db[np.absolute].update({
'm->m': npdatetime.timedelta_abs_impl,
})
ufunc_db[np.sign].update({
'm->m': npdatetime.timedelta_sign_impl,
})
ufunc_db[np.add].update({
'mm->m': npdatetime.timedelta_add_impl,
'Mm->M': npdatetime.datetime_plus_timedelta,
'mM->M': npdatetime.timedelta_plus_datetime,
})
ufunc_db[np.subtract].update({
'mm->m': npdatetime.timedelta_sub_impl,
'Mm->M': npdatetime.datetime_minus_timedelta,
'MM->m': npdatetime.datetime_minus_datetime,
})
ufunc_db[np.multiply].update({
'mq->m': npdatetime.timedelta_times_number,
'md->m': npdatetime.timedelta_times_number,
'qm->m': npdatetime.number_times_timedelta,
'dm->m': npdatetime.number_times_timedelta,
})
if np.divide != np.true_divide:
ufunc_db[np.divide].update({
'mq->m': npdatetime.timedelta_over_number,
'md->m': npdatetime.timedelta_over_number,
'mm->d': npdatetime.timedelta_over_timedelta,
})
ufunc_db[np.true_divide].update({
'mq->m': npdatetime.timedelta_over_number,
'md->m': npdatetime.timedelta_over_number,
'mm->d': npdatetime.timedelta_over_timedelta,
})
ufunc_db[np.floor_divide].update({
'mq->m': npdatetime.timedelta_over_number,
'md->m': npdatetime.timedelta_over_number,
})
ufunc_db[np.equal].update({
'MM->?': npdatetime.datetime_eq_datetime_impl,
'mm->?': npdatetime.timedelta_eq_timedelta_impl,
})
ufunc_db[np.not_equal].update({
'MM->?': npdatetime.datetime_ne_datetime_impl,
'mm->?': npdatetime.timedelta_ne_timedelta_impl,
})
ufunc_db[np.less].update({
'MM->?': npdatetime.datetime_lt_datetime_impl,
'mm->?': npdatetime.timedelta_lt_timedelta_impl,
})
ufunc_db[np.less_equal].update({
'MM->?': npdatetime.datetime_le_datetime_impl,
'mm->?': npdatetime.timedelta_le_timedelta_impl,
})
ufunc_db[np.greater].update({
'MM->?': npdatetime.datetime_gt_datetime_impl,
'mm->?': npdatetime.timedelta_gt_timedelta_impl,
})
ufunc_db[np.greater_equal].update({
'MM->?': npdatetime.datetime_ge_datetime_impl,
'mm->?': npdatetime.timedelta_ge_timedelta_impl,
})
ufunc_db[np.maximum].update({
'MM->M': npdatetime.datetime_max_impl,
'mm->m': npdatetime.timedelta_max_impl,
})
ufunc_db[np.minimum].update({
'MM->M': npdatetime.datetime_min_impl,
'mm->m': npdatetime.timedelta_min_impl,
})
# there is no difference for datetime/timedelta in maximum/fmax
# and minimum/fmin
ufunc_db[np.fmax].update({
'MM->M': npdatetime.datetime_max_impl,
'mm->m': npdatetime.timedelta_max_impl,
})
ufunc_db[np.fmin].update({
'MM->M': npdatetime.datetime_min_impl,
'mm->m': npdatetime.timedelta_min_impl,
})
|
GaZ3ll3/numba
|
numba/targets/ufunc_db.py
|
Python
|
bsd-2-clause
| 34,872
|
# requests-oauth 0.4.0
# Hacked to support RSA-SHA1 encryption for Atlassian OAuth.
# Original author: Miguel Araujo
# Forked from https://github.com/maraujop/requests_oauth
# Original license: 3-clause BSD
from hook import OAuthHook
|
blackspiraldev/jira-python
|
jira/packages/requests_oauth/__init__.py
|
Python
|
bsd-2-clause
| 235
|
# -*- coding: UTF-8 -*-
from lino.projects.std.settings import *
import logging
logging.getLogger('weasyprint').setLevel("ERROR") # see #1462
class Site(Site):
title = "Lino@prj1"
# server_url = "https://prj1.mydomain.com"
SITE = Site(globals())
# locally override attributes of individual plugins
# SITE.plugins.finan.suggest_future_vouchers = True
# MySQL
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mysite', #database name
'USER': 'django',
'PASSWORD': 'my cool password',
'HOST': 'localhost',
'PORT': 3306,
'OPTIONS': {
"init_command": "SET storage_engine=MyISAM",
}
}
}
|
lino-framework/book
|
docs/admin/mypy/prj1/settings.py
|
Python
|
bsd-2-clause
| 724
|
#!/usr/bin/python3
#
# Copyright (C) 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Script for testing ganeti.rapi.testutils"""
import unittest
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import opcodes
from ganeti import luxi
from ganeti import rapi
from ganeti import utils
import ganeti.rapi.testutils
import ganeti.rapi.client
import testutils
KNOWN_UNUSED_LUXI = compat.UniqueFrozenset([
luxi.REQ_SUBMIT_MANY_JOBS,
luxi.REQ_SUBMIT_JOB_TO_DRAINED_QUEUE,
luxi.REQ_ARCHIVE_JOB,
luxi.REQ_AUTO_ARCHIVE_JOBS,
luxi.REQ_CHANGE_JOB_PRIORITY,
luxi.REQ_PICKUP_JOB,
luxi.REQ_QUERY_EXPORTS,
luxi.REQ_QUERY_CONFIG_VALUES,
luxi.REQ_QUERY_NETWORKS,
luxi.REQ_QUERY_TAGS,
luxi.REQ_SET_DRAIN_FLAG,
luxi.REQ_SET_WATCHER_PAUSE,
])
# Global variable for storing used LUXI calls
_used_luxi_calls = None
class TestHideInternalErrors(unittest.TestCase):
def test(self):
def inner():
raise errors.GenericError("error")
fn = rapi.testutils._HideInternalErrors(inner)
self.assertRaises(rapi.testutils.VerificationError, fn)
class TestVerifyOpInput(unittest.TestCase):
def testUnknownOpId(self):
voi = rapi.testutils.VerifyOpInput
self.assertRaises(rapi.testutils.VerificationError, voi, "UNK_OP_ID", None)
def testUnknownParameter(self):
voi = rapi.testutils.VerifyOpInput
self.assertRaises(rapi.testutils.VerificationError, voi,
opcodes.OpClusterRename.OP_ID, {
"unk": "unk",
})
def testWrongParameterValue(self):
voi = rapi.testutils.VerifyOpInput
self.assertRaises(rapi.testutils.VerificationError, voi,
opcodes.OpClusterRename.OP_ID, {
"name": object(),
})
def testSuccess(self):
voi = rapi.testutils.VerifyOpInput
voi(opcodes.OpClusterRename.OP_ID, {
"name": "new-name.example.com",
})
class TestVerifyOpResult(unittest.TestCase):
def testSuccess(self):
vor = rapi.testutils.VerifyOpResult
vor(opcodes.OpClusterVerify.OP_ID, {
constants.JOB_IDS_KEY: [
(False, "error message"),
],
})
def testWrongResult(self):
vor = rapi.testutils.VerifyOpResult
self.assertRaises(rapi.testutils.VerificationError, vor,
opcodes.OpClusterVerify.OP_ID, [])
def testNoResultCheck(self):
vor = rapi.testutils.VerifyOpResult
vor(opcodes.OpTestDummy.OP_ID, None)
class TestInputTestClient(unittest.TestCase):
def setUp(self):
self.cl = rapi.testutils.InputTestClient()
def tearDown(self):
_used_luxi_calls.update(self.cl._GetLuxiCalls())
def testGetInfo(self):
self.assertTrue(self.cl.GetInfo() is NotImplemented)
def testPrepareExport(self):
result = self.cl.PrepareExport("inst1.example.com",
constants.EXPORT_MODE_LOCAL)
self.assertTrue(result is NotImplemented)
self.assertRaises(rapi.testutils.VerificationError, self.cl.PrepareExport,
"inst1.example.com", "###invalid###")
def testGetJobs(self):
self.assertTrue(self.cl.GetJobs() is NotImplemented)
def testQuery(self):
result = self.cl.Query(constants.QR_NODE, ["name"])
self.assertTrue(result is NotImplemented)
def testQueryFields(self):
result = self.cl.QueryFields(constants.QR_INSTANCE)
self.assertTrue(result is NotImplemented)
def testCancelJob(self):
self.assertTrue(self.cl.CancelJob("1") is NotImplemented)
def testGetNodes(self):
self.assertTrue(self.cl.GetNodes() is NotImplemented)
def testGetInstances(self):
self.assertTrue(self.cl.GetInstances() is NotImplemented)
def testGetGroups(self):
self.assertTrue(self.cl.GetGroups() is NotImplemented)
def testWaitForJobChange(self):
result = self.cl.WaitForJobChange("1", ["id"], None, None)
self.assertTrue(result is NotImplemented)
def testGetFilters(self):
self.assertTrue(self.cl.GetFilters() is NotImplemented)
def testGetFilter(self):
result = self.cl.GetFilter("4364c043-f232-41e3-837f-f1ce846f21d2")
self.assertTrue(result is NotImplemented)
def testReplaceFilter(self):
self.assertTrue(self.cl.ReplaceFilter(
uuid="c6a70f02-facb-4e37-b344-54f146dd0396",
priority=1,
predicates=[["jobid", [">", "id", "watermark"]]],
action="CONTINUE",
reason_trail=["testReplaceFilter", "myreason", utils.EpochNano()],
) is NotImplemented)
def testAddFilter(self):
self.assertTrue(self.cl.AddFilter(
priority=1,
predicates=[["jobid", [">", "id", "watermark"]]],
action="CONTINUE",
reason_trail=["testAddFilter", "myreason", utils.EpochNano()],
) is NotImplemented)
def testDeleteFilter(self):
self.assertTrue(self.cl.DeleteFilter(
uuid="c6a70f02-facb-4e37-b344-54f146dd0396",
) is NotImplemented)
class CustomTestRunner(unittest.TextTestRunner):
def run(self, *args):
global _used_luxi_calls
assert _used_luxi_calls is None
diff = (KNOWN_UNUSED_LUXI - luxi.REQ_ALL)
assert not diff, "Non-existing LUXI calls listed as unused: %s" % diff
_used_luxi_calls = set()
try:
# Run actual tests
result = unittest.TextTestRunner.run(self, *args)
diff = _used_luxi_calls & KNOWN_UNUSED_LUXI
if diff:
raise AssertionError("LUXI methods marked as unused were called: %s" %
utils.CommaJoin(diff))
diff = (luxi.REQ_ALL - KNOWN_UNUSED_LUXI - _used_luxi_calls)
if diff:
raise AssertionError("The following LUXI methods were not used: %s" %
utils.CommaJoin(diff))
finally:
# Reset global variable
_used_luxi_calls = None
return result
if __name__ == "__main__":
testutils.GanetiTestProgram(testRunner=CustomTestRunner)
|
ganeti/ganeti
|
test/py/ganeti.rapi.testutils_unittest.py
|
Python
|
bsd-2-clause
| 7,060
|
from tests import BaseTestCase
import mock
import time
from redash.models import User
from redash.authentication.account import invite_token
from tests.handlers import get_request, post_request
class TestInvite(BaseTestCase):
def test_expired_invite_token(self):
with mock.patch('time.time') as patched_time:
patched_time.return_value = time.time() - (7 * 24 * 3600) - 10
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_invite_token(self):
response = get_request('/invite/badtoken', org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_valid_token(self):
token = invite_token(self.factory.user)
response = get_request('/invite/{}'.format(token), org=self.factory.org)
self.assertEqual(response.status_code, 200)
def test_already_active_user(self):
pass
class TestInvitePost(BaseTestCase):
def test_empty_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': ''}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_invalid_password(self):
token = invite_token(self.factory.user)
response = post_request('/invite/{}'.format(token), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_bad_token(self):
response = post_request('/invite/{}'.format('jdsnfkjdsnfkj'), data={'password': '1234'}, org=self.factory.org)
self.assertEqual(response.status_code, 400)
def test_already_active_user(self):
pass
def test_valid_password(self):
token = invite_token(self.factory.user)
password = 'test1234'
response = post_request('/invite/{}'.format(token), data={'password': password}, org=self.factory.org)
self.assertEqual(response.status_code, 302)
user = User.get_by_id(self.factory.user.id)
self.assertTrue(user.verify_password(password))
|
easytaxibr/redash
|
tests/handlers/test_authentication.py
|
Python
|
bsd-2-clause
| 2,187
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1.type import univ
from pyasn1_modules import pem
from pyasn1_modules import rfc5280
from pyasn1_modules import rfc4043
class PermIdCertTestCase(unittest.TestCase):
cert_pem_text = """\
MIIDDTCCApOgAwIBAgIJAKWzVCgbsG5HMAoGCCqGSM49BAMDMD8xCzAJBgNVBAYT
AlVTMQswCQYDVQQIDAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9n
dXMgQ0EwHhcNMTkxMTEwMDA0MDIyWhcNMjAxMTA5MDA0MDIyWjBNMQswCQYDVQQG
EwJVUzELMAkGA1UECBMCVkExEDAOBgNVBAcTB0hlcm5kb24xEDAOBgNVBAoTB0V4
YW1wbGUxDTALBgNVBAMTBEdhaWwwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQBoktg
/68xL+uEQaWBoHyOjw8EMLeMEng3R2H7yiEzTGoaMJgPOKvSfzB2P0paHYPL+B5y
Gc0CK5EHRujMl9ljH+Wydpk57rKBLo1ZzpWUS6anLGIkWs1sOakcgGGr7hGjggFL
MIIBRzAdBgNVHQ4EFgQU1pCNZuMzfEaJ9GGhH7RKy6Mvz+cwbwYDVR0jBGgwZoAU
8jXbNATapVXyvWkDmbBi7OIVCMGhQ6RBMD8xCzAJBgNVBAYTAlVTMQswCQYDVQQI
DAJWQTEQMA4GA1UEBwwHSGVybmRvbjERMA8GA1UECgwIQm9ndXMgQ0GCCQDokdYG
kU/O8jAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBhjBCBglghkgBhvhCAQ0E
NRYzVGhpcyBjZXJ0aWZpY2F0ZSBjYW5ub3QgYmUgdHJ1c3RlZCBmb3IgYW55IHB1
cnBvc2UuMFMGA1UdEQRMMEqgNgYIKwYBBQUHCAOgKjAoDBs4MjYyMDgtNDE3MDI4
LTU0ODE5NS0yMTUyMzMGCSsGAQQBgaxgMIEQZ2FpbEBleGFtcGxlLmNvbTAKBggq
hkjOPQQDAwNoADBlAjBT+36Y/LPaGSu+61P7kR97M8jAjtH5DtUwrWR02ChshvYJ
x0bpZq3PJaO0WlBgFicCMQCf+67wSvjxxtjI/OAg4t8NQIJW1LcehSXizlPDc772
/FC5OiUAxO+iFaSVMeDFsCo=
"""
def setUp(self):
self.asn1Spec = rfc5280.Certificate()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
perm_id_oid = rfc4043.id_on_permanentIdentifier
assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
permanent_identifier_found = False
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] == rfc5280.id_ce_subjectAltName:
extnValue, rest = der_decoder(
extn['extnValue'], asn1Spec=rfc5280.SubjectAltName())
self.assertFalse(rest)
self.assertTrue(extnValue.prettyPrint())
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
for gn in extnValue:
if gn['otherName'].hasValue():
self.assertEqual(perm_id_oid, gn['otherName']['type-id'])
onValue, rest = der_decoder(
gn['otherName']['value'],
asn1Spec=rfc4043.PermanentIdentifier())
self.assertFalse(rest)
self.assertTrue(onValue.prettyPrint())
self.assertEqual(gn['otherName']['value'], der_encoder(onValue))
self.assertEqual(assigner_oid, onValue['assigner'])
permanent_identifier_found = True
self.assertTrue(permanent_identifier_found)
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.cert_pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
perm_id_oid = rfc4043.id_on_permanentIdentifier
assigner_oid = univ.ObjectIdentifier('1.3.6.1.4.1.22112.48')
permanent_identifier_found = False
for extn in asn1Object['tbsCertificate']['extensions']:
if extn['extnID'] == rfc5280.id_ce_subjectAltName:
extnValue, rest = der_decoder(
extn['extnValue'], asn1Spec=rfc5280.SubjectAltName(),
decodeOpenTypes=True)
self.assertFalse(rest)
self.assertTrue(extnValue.prettyPrint())
self.assertEqual(extn['extnValue'], der_encoder(extnValue))
for gn in extnValue:
if gn['otherName'].hasValue():
on = gn['otherName']
self.assertEqual(perm_id_oid, on['type-id'])
self.assertEqual(assigner_oid, on['value']['assigner'])
permanent_identifier_found = True
self.assertTrue(permanent_identifier_found)
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
|
etingof/pyasn1-modules
|
tests/test_rfc4043.py
|
Python
|
bsd-2-clause
| 4,870
|
{% extends 'setup.py.jj2' %}
{%block platform_block%}
{%endblock%}
{%block additional_keywords%}
"plain",
"simple",
"grid",
"pipe",
"orgtbl",
"rst",
"mediawiki",
"latex",
"latex_booktabs",
"html",
"json"
{%endblock%}
|
pyexcel/pyexcel-text
|
.moban.d/setup.py
|
Python
|
bsd-3-clause
| 264
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import os
import sys
import subprocess
import pytest
from ...tests.helper import catch_warnings
from ...extern import six
from ...utils.data import get_pkg_data_filename
from .. import configuration
from .. import paths
from ...utils.exceptions import AstropyDeprecationWarning
def test_paths():
assert 'astropy' in paths.get_config_dir()
assert 'astropy' in paths.get_cache_dir()
def test_set_temp_config(tmpdir, monkeypatch):
monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)
orig_config_dir = paths.get_config_dir()
temp_config_dir = str(tmpdir.mkdir('config'))
temp_astropy_config = os.path.join(temp_config_dir, 'astropy')
# Test decorator mode
@paths.set_temp_config(temp_config_dir)
def test_func():
assert paths.get_config_dir() == temp_astropy_config
# Test temporary restoration of original default
with paths.set_temp_config() as d:
assert d == orig_config_dir == paths.get_config_dir()
test_func()
# Test context manager mode (with cleanup)
with paths.set_temp_config(temp_config_dir, delete=True):
assert paths.get_config_dir() == temp_astropy_config
assert not os.path.exists(temp_config_dir)
def test_set_temp_cache(tmpdir, monkeypatch):
monkeypatch.setattr(paths.set_temp_cache, '_temp_path', None)
orig_cache_dir = paths.get_cache_dir()
temp_cache_dir = str(tmpdir.mkdir('cache'))
temp_astropy_cache = os.path.join(temp_cache_dir, 'astropy')
# Test decorator mode
@paths.set_temp_cache(temp_cache_dir)
def test_func():
assert paths.get_cache_dir() == temp_astropy_cache
# Test temporary restoration of original default
with paths.set_temp_cache() as d:
assert d == orig_cache_dir == paths.get_cache_dir()
test_func()
# Test context manager mode (with cleanup)
with paths.set_temp_cache(temp_cache_dir, delete=True):
assert paths.get_cache_dir() == temp_astropy_cache
assert not os.path.exists(temp_cache_dir)
def test_config_file():
from ..configuration import get_config, reload_config
apycfg = get_config('astropy')
assert apycfg.filename.endswith('astropy.cfg')
cfgsec = get_config('astropy.config')
assert cfgsec.depth == 1
assert cfgsec.name == 'config'
assert cfgsec.parent.filename.endswith('astropy.cfg')
reload_config('astropy')
def test_configitem():
from ..configuration import ConfigNamespace, ConfigItem, get_config
ci = ConfigItem(34, 'this is a Description')
class Conf(ConfigNamespace):
tstnm = ci
conf = Conf()
assert ci.module == 'astropy.config.tests.test_configs'
assert ci() == 34
assert ci.description == 'this is a Description'
assert conf.tstnm == 34
sec = get_config(ci.module)
assert sec['tstnm'] == 34
ci.description = 'updated Descr'
ci.set(32)
assert ci() == 32
# It's useful to go back to the default to allow other test functions to
# call this one and still be in the default configuration.
ci.description = 'this is a Description'
ci.set(34)
assert ci() == 34
def test_configitem_types():
from ..configuration import ConfigNamespace, ConfigItem
cio = ConfigItem(['op1', 'op2', 'op3'])
class Conf(ConfigNamespace):
tstnm1 = ConfigItem(34)
tstnm2 = ConfigItem(34.3)
tstnm3 = ConfigItem(True)
tstnm4 = ConfigItem('astring')
conf = Conf()
assert isinstance(conf.tstnm1, int)
assert isinstance(conf.tstnm2, float)
assert isinstance(conf.tstnm3, bool)
assert isinstance(conf.tstnm4, six.text_type)
with pytest.raises(TypeError):
conf.tstnm1 = 34.3
conf.tstnm2 = 12 # this would should succeed as up-casting
with pytest.raises(TypeError):
conf.tstnm3 = 'fasd'
with pytest.raises(TypeError):
conf.tstnm4 = 546.245
def test_configitem_options(tmpdir):
from ..configuration import ConfigNamespace, ConfigItem, get_config
cio = ConfigItem(['op1', 'op2', 'op3'])
class Conf(ConfigNamespace):
tstnmo = cio
conf = Conf()
sec = get_config(cio.module)
assert isinstance(cio(), six.text_type)
assert cio() == 'op1'
assert sec['tstnmo'] == 'op1'
cio.set('op2')
with pytest.raises(TypeError):
cio.set('op5')
assert sec['tstnmo'] == 'op2'
# now try saving
apycfg = sec
while apycfg.parent is not apycfg:
apycfg = apycfg.parent
f = tmpdir.join('astropy.cfg')
with io.open(f.strpath, 'wb') as fd:
apycfg.write(fd)
with io.open(f.strpath, 'rU', encoding='utf-8') as fd:
lns = [x.strip() for x in f.readlines()]
assert 'tstnmo = op2' in lns
def test_config_noastropy_fallback(monkeypatch):
"""
Tests to make sure configuration items fall back to their defaults when
there's a problem accessing the astropy directory
"""
# make sure the config directory is not searched
monkeypatch.setenv(str('XDG_CONFIG_HOME'), 'foo')
monkeypatch.delenv(str('XDG_CONFIG_HOME'))
monkeypatch.setattr(paths.set_temp_config, '_temp_path', None)
# make sure the _find_or_create_astropy_dir function fails as though the
# astropy dir could not be accessed
def osraiser(dirnm, linkto):
raise OSError
monkeypatch.setattr(paths, '_find_or_create_astropy_dir', osraiser)
# also have to make sure the stored configuration objects are cleared
monkeypatch.setattr(configuration, '_cfgobjs', {})
with pytest.raises(OSError):
# make sure the config dir search fails
paths.get_config_dir()
# now run the basic tests, and make sure the warning about no astropy
# is present
with catch_warnings(configuration.ConfigurationMissingWarning) as w:
test_configitem()
assert len(w) == 1
w = w[0]
assert 'Configuration defaults will be used' in str(w.message)
def test_configitem_setters():
from ..configuration import ConfigNamespace, ConfigItem
class Conf(ConfigNamespace):
tstnm12 = ConfigItem(42, 'this is another Description')
conf = Conf()
assert conf.tstnm12 == 42
with conf.set_temp('tstnm12', 45):
assert conf.tstnm12 == 45
assert conf.tstnm12 == 42
conf.tstnm12 = 43
assert conf.tstnm12 == 43
with conf.set_temp('tstnm12', 46):
assert conf.tstnm12 == 46
# Make sure it is reset even with Exception
try:
with conf.set_temp('tstnm12', 47):
raise Exception
except Exception:
pass
assert conf.tstnm12 == 43
def test_empty_config_file():
from ..configuration import is_unedited_config_file
def get_content(fn):
with io.open(get_pkg_data_filename(fn), 'rt', encoding='latin-1') as fd:
return fd.read()
content = get_content('data/empty.cfg')
assert is_unedited_config_file(content)
content = get_content('data/not_empty.cfg')
assert not is_unedited_config_file(content)
content = get_content('data/astropy.0.3.cfg')
assert is_unedited_config_file(content)
content = get_content('data/astropy.0.3.windows.cfg')
assert is_unedited_config_file(content)
class TestAliasRead(object):
def setup_class(self):
configuration._override_config_file = get_pkg_data_filename('data/alias.cfg')
def test_alias_read(self):
from astropy.utils.data import conf
with catch_warnings() as w:
conf.reload()
assert conf.remote_timeout == 42
assert len(w) == 1
assert str(w[0].message).startswith(
"Config parameter 'name_resolve_timeout' in section "
"[coordinates.name_resolve]")
def teardown_class(self):
from astropy.utils.data import conf
configuration._override_config_file = None
conf.reload()
def test_configitem_unicode(tmpdir):
from ..configuration import ConfigNamespace, ConfigItem, get_config
cio = ConfigItem('ასტრონომიის')
class Conf(ConfigNamespace):
tstunicode = cio
conf = Conf()
sec = get_config(cio.module)
assert isinstance(cio(), six.text_type)
assert cio() == 'ასტრონომიის'
assert sec['tstunicode'] == 'ასტრონომიის'
def test_warning_move_to_top_level():
# Check that the warning about deprecation config items in the
# file works. See #2514
from ... import conf
configuration._override_config_file = get_pkg_data_filename('data/deprecated.cfg')
try:
with catch_warnings(AstropyDeprecationWarning) as w:
conf.reload()
conf.max_lines
assert len(w) == 1
finally:
configuration._override_config_file = None
conf.reload()
def test_no_home():
# "import astropy" fails when neither $HOME or $XDG_CONFIG_HOME
# are set. To test, we unset those environment variables for a
# subprocess and try to import astropy.
test_path = os.path.dirname(__file__)
astropy_path = os.path.abspath(
os.path.join(test_path, '..', '..', '..'))
env = os.environ.copy()
paths = [astropy_path]
if env.get('PYTHONPATH'):
paths.append(env.get('PYTHONPATH'))
env[str('PYTHONPATH')] = str(os.pathsep.join(paths))
for val in ['HOME', 'XDG_CONFIG_HOME']:
if val in env:
del env[val]
retcode = subprocess.check_call(
[sys.executable, '-c', 'import astropy'],
env=env)
assert retcode == 0
def test_unedited_template():
# Test that the config file is written at most once
config_dir = os.path.join(os.path.dirname(__file__), '..', '..')
configuration.update_default_config('astropy', config_dir)
assert configuration.update_default_config('astropy', config_dir) is False
|
kelle/astropy
|
astropy/config/tests/test_configs.py
|
Python
|
bsd-3-clause
| 10,072
|
# coding: utf-8
""" Test cases for .hist method """
import pytest
from pandas import Series, DataFrame
import pandas.util.testing as tm
import pandas.util._test_decorators as td
import numpy as np
from numpy.random import randn
from pandas.plotting._core import grouped_hist
from pandas.plotting._compat import _mpl_ge_2_2_0
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works)
@td.skip_if_no_mpl
class TestSeriesPlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
@pytest.mark.slow
def test_hist_legacy(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.ts.hist, by=self.ts.index.month, bins=5)
fig, ax = self.plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = self.plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with pytest.raises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@pytest.mark.slow
def test_hist_bins_legacy(self):
df = DataFrame(np.random.randn(10, 2))
ax = df.hist(bins=2)[0][0]
assert len(ax.patches) == 2
@pytest.mark.slow
def test_hist_layout(self):
df = self.hist_df
with pytest.raises(ValueError):
df.height.hist(layout=(1, 1))
with pytest.raises(ValueError):
df.height.hist(layout=[1, 1])
@pytest.mark.slow
def test_hist_layout_with_by(self):
df = self.hist_df
# _check_plot_works adds an `ax` kwarg to the method call
# so we get a warning about an axis being cleared, even
# though we don't explicing pass one, see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender,
layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.gender,
layout=(3, -1))
self._check_axes_shape(axes, axes_num=2, layout=(3, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.height.hist, by=df.category,
layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(2, -1))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(3, -1))
self._check_axes_shape(axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.category, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=4, layout=(1, 4))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.height.hist, by=df.classroom, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.height.hist(by=df.category, layout=(4, 2), figsize=(12, 7))
self._check_axes_shape(
axes, axes_num=4, layout=(4, 2), figsize=(12, 7))
@pytest.mark.slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.axes
assert len(axes) == 2
@pytest.mark.slow
def test_hist_by_no_extra_plots(self):
df = self.hist_df
axes = df.height.hist(by=df.gender) # noqa
assert len(self.plt.get_fignums()) == 1
@pytest.mark.slow
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with pytest.raises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_hist_df_legacy(self):
from matplotlib.patches import Rectangle
with tm.assert_produces_warning(UserWarning):
_check_plot_works(self.hist_df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 3))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, grid=False)
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert not axes[1, 1].get_visible()
df = DataFrame(randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 6))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, layout=(4, 2))
self._check_axes_shape(axes, axes_num=6, layout=(4, 2))
# make sure sharex, sharey is handled
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, figsize=(8, 10))
# check bins argument
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.hist, bins=5)
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = ser.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
xf, yf = 20, 18
xrot, yrot = 30, 40
axes = df.hist(xlabelsize=xf, xrot=xrot, ylabelsize=yf, yrot=yrot)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
tm.close()
# make sure kwargs to hist are handled
if _mpl_ge_2_2_0():
kwargs = {"density": True}
else:
kwargs = {"normed": True}
ax = ser.hist(cumulative=True, bins=4, **kwargs)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self._check_ax_scales(ax, yaxis='log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with pytest.raises(AttributeError):
ser.hist(foo='bar')
@pytest.mark.slow
def test_hist_layout(self):
df = DataFrame(randn(100, 3))
layout_to_expected_size = (
{'layout': None, 'expected_size': (2, 2)}, # default is 2x2
{'layout': (2, 2), 'expected_size': (2, 2)},
{'layout': (4, 1), 'expected_size': (4, 1)},
{'layout': (1, 4), 'expected_size': (1, 4)},
{'layout': (3, 3), 'expected_size': (3, 3)},
{'layout': (-1, 4), 'expected_size': (1, 4)},
{'layout': (4, -1), 'expected_size': (4, 1)},
{'layout': (-1, 2), 'expected_size': (2, 2)},
{'layout': (2, -1), 'expected_size': (2, 2)}
)
for layout_test in layout_to_expected_size:
axes = df.hist(layout=layout_test['layout'])
expected = layout_test['expected_size']
self._check_axes_shape(axes, axes_num=3, layout=expected)
# layout too small for all 4 plots
with pytest.raises(ValueError):
df.hist(layout=(1, 1))
# invalid format for layout
with pytest.raises(ValueError):
df.hist(layout=(1,))
with pytest.raises(ValueError):
df.hist(layout=(-1, -1))
@pytest.mark.slow
# GH 9351
def test_tight_layout(self):
if self.mpl_ge_2_0_1:
df = DataFrame(randn(100, 3))
_check_plot_works(df.hist)
self.plt.tight_layout()
tm.close()
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_grouped_hist_legacy(self):
from matplotlib.patches import Rectangle
df = DataFrame(randn(500, 2), columns=['A', 'B'])
df['C'] = np.random.randint(0, 4, 500)
df['D'] = ['X'] * 500
axes = grouped_hist(df.A, by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
axes = df.hist(by=df.C)
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
tm.close()
# group by a key with single value
axes = df.hist(by='D', rot=30)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
self._check_ticks_props(axes, xrot=30)
tm.close()
# make sure kwargs to hist are handled
xf, yf = 20, 18
xrot, yrot = 30, 40
if _mpl_ge_2_2_0():
kwargs = {"density": True}
else:
kwargs = {"normed": True}
axes = grouped_hist(df.A, by=df.C, cumulative=True,
bins=4, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot, **kwargs)
# height of last bin (index 5) must be 1.0
for ax in axes.ravel():
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
height = rects[-1].get_height()
tm.assert_almost_equal(height, 1.0)
self._check_ticks_props(axes, xlabelsize=xf, xrot=xrot,
ylabelsize=yf, yrot=yrot)
tm.close()
axes = grouped_hist(df.A, by=df.C, log=True)
# scale of y must be 'log'
self._check_ax_scales(axes, yaxis='log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with pytest.raises(AttributeError):
grouped_hist(df.A, by=df.C, foo='bar')
with tm.assert_produces_warning(FutureWarning):
df.hist(by='C', figsize='default')
@pytest.mark.slow
def test_grouped_hist_legacy2(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender_int = np.random.choice([0, 1], size=n)
df_int = DataFrame({'height': height, 'weight': weight,
'gender': gender_int})
gb = df_int.groupby('gender')
axes = gb.hist()
assert len(axes) == 2
assert len(self.plt.get_fignums()) == 2
tm.close()
@pytest.mark.slow
def test_grouped_hist_layout(self):
df = self.hist_df
pytest.raises(ValueError, df.hist, column='weight', by=df.gender,
layout=(1, 1))
pytest.raises(ValueError, df.hist, column='height', by=df.category,
layout=(1, 3))
pytest.raises(ValueError, df.hist, column='height', by=df.category,
layout=(-1, -1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, column='height', by=df.gender,
layout=(2, 1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, column='height', by=df.gender,
layout=(2, -1))
self._check_axes_shape(axes, axes_num=2, layout=(2, 1))
axes = df.hist(column='height', by=df.category, layout=(4, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column='height', by=df.category, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.hist(column='height', by=df.category,
layout=(4, 2), figsize=(12, 8))
self._check_axes_shape(
axes, axes_num=4, layout=(4, 2), figsize=(12, 8))
tm.close()
# GH 6769
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.hist, column='height', by='classroom', layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
# without column
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.hist, by='classroom')
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
axes = df.hist(by='gender', layout=(3, 5))
self._check_axes_shape(axes, axes_num=2, layout=(3, 5))
axes = df.hist(column=['height', 'weight', 'category'])
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
@pytest.mark.slow
def test_grouped_hist_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
fig, axes = self.plt.subplots(2, 3)
returned = df.hist(column=['height', 'weight', 'category'], ax=axes[0])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
returned = df.hist(by='classroom', ax=axes[1])
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
axes = df.hist(column='height', ax=axes)
@pytest.mark.slow
def test_axis_share_x(self):
df = self.hist_df
# GH4089
ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True)
# share x
assert ax1._shared_x_axes.joined(ax1, ax2)
assert ax2._shared_x_axes.joined(ax1, ax2)
# don't share y
assert not ax1._shared_y_axes.joined(ax1, ax2)
assert not ax2._shared_y_axes.joined(ax1, ax2)
@pytest.mark.slow
def test_axis_share_y(self):
df = self.hist_df
ax1, ax2 = df.hist(column='height', by=df.gender, sharey=True)
# share y
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
# don't share x
assert not ax1._shared_x_axes.joined(ax1, ax2)
assert not ax2._shared_x_axes.joined(ax1, ax2)
@pytest.mark.slow
def test_axis_share_xy(self):
df = self.hist_df
ax1, ax2 = df.hist(column='height', by=df.gender, sharex=True,
sharey=True)
# share both x and y
assert ax1._shared_x_axes.joined(ax1, ax2)
assert ax2._shared_x_axes.joined(ax1, ax2)
assert ax1._shared_y_axes.joined(ax1, ax2)
assert ax2._shared_y_axes.joined(ax1, ax2)
|
harisbal/pandas
|
pandas/tests/plotting/test_hist_method.py
|
Python
|
bsd-3-clause
| 15,814
|
# -*- coding: utf-8 -*-
"""
Unit tests for reverse URL lookups.
"""
from __future__ import unicode_literals
import sys
import threading
import unittest
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import (
SimpleTestCase, TestCase, ignore_warnings, override_settings,
)
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, RegexURLPattern, RegexURLResolver, Resolver404,
ResolverMatch, get_callable, get_resolver, resolve, reverse, reverse_lazy,
)
from django.utils import six
from django.utils.deprecation import RemovedInDjango20Warning
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', '', '', 'inc-normal-view', views.empty_view, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', '', '', 'inc-view-class', views.view_class_instance, tuple(),
{'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', '', '', 'inc-mixed-args', views.empty_view, tuple(),
{'arg2': '37'}
),
(
'/included/12/mixed_args/42/37/', 'inc-mixed-args', '', '', 'inc-mixed-args', views.empty_view, tuple(),
{'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
tuple(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
('/included/no_kwargs/42/37/', 'inc-no-kwargs', '', '', 'inc-no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', '', '', 'inc-no-kwargs', views.empty_view,
('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', '', 'inc-ns1', 'inc-ns1:inc-normal-view', views.empty_view,
tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns3', 'test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view', views.empty_view,
tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'testapp', 'inc-ns1:test-ns3',
'inc-ns1:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3', 'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view,
tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'inc-app:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view', 'inc-app:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3', 'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view', views.empty_view,
tuple(), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
('/inc70/', 'inner-nothing', '', 'inc-ns5', 'inc-ns5:inner-nothing', views.empty_view, tuple(), {'outer': '70'}),
(
'/inc78/extra/foobar/', 'inner-extra', '', 'inc-ns5', 'inc-ns5:inner-extra', views.empty_view, tuple(),
{'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
(
'windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [],
dict(drive_name='C', path=r'Documents and Settings\spam')
),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('insensitive', '/CaseInsensitive/fred', ['fred'], {}),
('test', '/test/1', [], {}),
('test2', '/test/2', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import."
):
getattr(resolver, 'url_patterns')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_reverse_returns_unicode(self):
name, expected, args, kwargs = test_data[0]
self.assertIsInstance(
reverse(name, args=args, kwargs=kwargs),
six.text_type
)
class ResolverTests(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango20Warning)
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver('urlpatterns_reverse.named_urls')
self.assertEqual(resolver.reverse('named-url1'), '')
self.assertEqual(resolver.reverse('named-url2', 'arg'), 'extra/arg/')
self.assertEqual(resolver.reverse('named-url2', extra='arg'), 'extra/arg/')
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occuring later (#10834).
"""
with self.assertRaises(Resolver404):
resolve('')
with self.assertRaises(Resolver404):
resolve('a')
with self.assertRaises(Resolver404):
resolve('\\')
with self.assertRaises(Resolver404):
resolve('.')
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
with self.assertRaisesMessage(Resolver404, b'tried' if six.PY2 else 'tried') as cm:
resolve('/included/non-existent-url', urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
tried = e.args[0]['tried']
self.assertEqual(
len(e.args[0]['tried']),
len(url_types_names),
'Wrong number of tried URLs returned. Expected %s, got %s.' % (
len(url_types_names), len(e.args[0]['tried'])
)
)
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), str('%s is not an instance of %s') % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(
t.name,
e['name'],
'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)
)
def test_namespaced_view_detail(self):
resolver = get_resolver('urlpatterns_reverse.nested_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view1'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view2'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.View3'))
self.assertFalse(resolver._is_callback('urlpatterns_reverse.nested_urls.blub'))
@unittest.skipIf(six.PY2, "Python 2 doesn't support __qualname__.")
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver('urlpatterns_reverse.method_view_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.method_view'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view'))
def test_populate_concurrency(self):
"""
RegexURLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = RegexURLResolver(r'^/', 'urlpatterns_reverse.urls')
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.force_login(alfred)
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
if six.PY2:
self.assertEqual(
b'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
self.write_settings('settings.py', extra="""
from django.urls import reverse_lazy
LOGIN_URL = reverse_lazy('login')""")
def tearDown(self):
self.remove_settings('settings.py')
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj(object):
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
with self.assertRaises(NoReverseMatch):
redirect('not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
@ignore_warnings(category=RemovedInDjango20Warning)
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view')
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view', args=[37, 42])
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing')
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing', args=[37, 42])
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing', kwargs={'arg1': 42, 'arg2': 37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
with self.assertRaises(NoReverseMatch):
reverse('blahblah:urlobject-view')
with self.assertRaises(NoReverseMatch):
reverse('test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37, 42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('inc-normal-view', args=[37, 42]))
self.assertEqual('/included/normal/42/37/', reverse('inc-normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/included/+%5C$*/', reverse('inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_app_object(self):
"Dynamic URL objects can return a (pattern, app_name) 2-tuple, and include() can set the namespace"
self.assertEqual('/newapp1/inner/', reverse('new-ns1:urlobject-view'))
self.assertEqual('/newapp1/inner/37/42/', reverse('new-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/newapp1/inner/42/37/', reverse('new-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/newapp1/inner/+%5C$*/', reverse('new-ns1:urlobject-special-view'))
def test_app_object_default_namespace(self):
"Namespace defaults to app_name when including a (pattern, app_name) 2-tuple"
self.assertEqual('/new-default/inner/', reverse('newapp:urlobject-view'))
self.assertEqual('/new-default/inner/37/42/', reverse('newapp:urlobject-view', args=[37, 42]))
self.assertEqual(
'/new-default/inner/42/37/', reverse('newapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/new-default/inner/+%5C$*/', reverse('newapp:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('test-ns3:urlobject-view'))
self.assertEqual('/included/test3/inner/37/42/', reverse('test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual(
'/included/test3/inner/42/37/', reverse('test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/included/test3/inner/+%5C$*/', reverse('test-ns3:urlobject-special-view'))
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_app_name_pattern(self):
"Namespaces can be applied to include()'d urlpatterns that set an app_name attribute"
self.assertEqual('/app-included1/normal/', reverse('app-ns1:inc-normal-view'))
self.assertEqual('/app-included1/normal/37/42/', reverse('app-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/app-included1/normal/42/37/', reverse('app-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/app-included1/+%5C$*/', reverse('app-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using an include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual(
'/ns-outer/42/normal/37/4/',
reverse('inc-outer:inc-normal-view', kwargs={'outer': 42, 'arg1': 37, 'arg2': 4})
)
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view')
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/37/42/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37, 42])
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view')
)
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/included/test3/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual(
'/included/test3/inner/37/42/',
reverse('testapp:urlobject-view', args=[37, 42], current_app='test-ns3')
)
self.assertEqual(
'/included/test3/inner/42/37/',
reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='test-ns3')
)
self.assertEqual(
'/included/test3/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3')
)
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual(
'/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42], current_app='other-ns1')
)
self.assertEqual(
'/other1/inner/42/37/',
reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='other-ns1')
)
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:inc-normal-view'))
self.assertEqual('/+%5C$*/included/normal/37/42/', reverse('special:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/+%5C$*/included/normal/42/37/',
reverse('special:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual(
'/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer': '78', 'extra': 'foobar'})
)
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78', 'foobar']))
def test_nested_app_lookup(self):
"A nested current_app should be split in individual namespaces (#24904)"
self.assertEqual('/ns-included1/test4/inner/', reverse('inc-ns1:testapp:urlobject-view'))
self.assertEqual('/ns-included1/test4/inner/37/42/', reverse('inc-ns1:testapp:urlobject-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/test4/inner/+%5C$*/', reverse('inc-ns1:testapp:urlobject-special-view'))
self.assertEqual(
'/ns-included1/test3/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='inc-ns1:test-ns3')
)
def test_current_app_no_partial_match(self):
"current_app should either match the whole path or shouldn't be used"
self.assertEqual(
'/ns-included1/test4/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='non-existent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='non-existent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37},
current_app='non-existent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='non-existent:test-ns3')
)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' with arguments '()' and keyword arguments '{}' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve_error_handler(400), handler)
self.assertEqual(self.resolver.resolve_error_handler(404), handler)
self.assertEqual(self.resolver.resolve_error_handler(500), handler)
def test_callable_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve_error_handler(400), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(404), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(500), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_full_import')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
with self.assertRaisesMessage(ValueError, "I don't think I'm getting good"):
self.client.get('/bad_view/')
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
with self.assertRaises(ImproperlyConfigured):
self.client.get('/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
@ignore_warnings(category=RemovedInDjango20Warning)
def test_urlpattern_resolve(self):
for path, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, 'view must be a callable'):
url(r'uncallable-object/$', views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
# passing a callable should return the callable
self.assertEqual(get_callable(empty_view), empty_view)
def test_exceptions(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
with self.assertRaisesMessage(ViewDoesNotExist, "View does not exist in"):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
with self.assertRaises(AttributeError):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
class IncludeTests(SimpleTestCase):
url_patterns = [
url(r'^inner/$', views.empty_view, name='urlobject-view'),
url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_app_name_but_no_namespace(self):
msg = "Must specify a namespace if specifying app_name."
with self.assertRaisesMessage(ValueError, msg):
include(self.url_patterns, app_name='bar')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_include_namespace(self):
# no app_name -> deprecated
self.assertEqual(include(self.url_patterns, 'namespace'), (self.url_patterns, None, 'namespace'))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_include_namespace_app_name(self):
# app_name argument to include -> deprecated
self.assertEqual(
include(self.url_patterns, 'namespace', 'app_name'),
(self.url_patterns, 'app_name', 'namespace')
)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_include_3_tuple(self):
# 3-tuple -> deprecated
self.assertEqual(
include((self.url_patterns, 'app_name', 'namespace')),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
match = resolve(test_url)
self.assertEqual(match.kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
url = reverse('lookahead-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead+/a-city/')
url = reverse('lookahead-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead-/a-city/')
url = reverse('lookbehind-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind+/a-city/')
url = reverse('lookbehind-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind-/a-city/')
def test_invalid_reverse(self):
with self.assertRaises(NoReverseMatch):
reverse('lookahead-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookahead-negative', kwargs={'city': 'not-a-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-negative', kwargs={'city': 'not-a-city'})
|
dfunckt/django
|
tests/urlpatterns_reverse/tests.py
|
Python
|
bsd-3-clause
| 50,749
|
from __future__ import with_statement
import pytest
from django.core import mail
from django.db import connection
from pytest_django_test.app.models import Item
# It doesn't matter which order all the _again methods are run, we just need
# to check the environment remains constant.
# This is possible with some of the testdir magic, but this is the lazy way
# to do it.
def test_mail():
assert len(mail.outbox) == 0
mail.send_mail('subject', 'body', 'from@example.com', ['to@example.com'])
assert len(mail.outbox) == 1
m = mail.outbox[0]
assert m.subject == 'subject'
assert m.body == 'body'
assert m.from_email == 'from@example.com'
assert list(m.to) == ['to@example.com']
def test_mail_again():
test_mail()
@pytest.mark.django_project(extra_settings="""
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'tpkg.app.urls'
""")
def test_invalid_template_variable(django_testdir):
django_testdir.create_app_file("""
from django.conf.urls import url
from pytest_django_test.compat import patterns
from tpkg.app import views
urlpatterns = patterns(
'',
url(r'invalid_template/', views.invalid_template),
)
""", 'urls.py')
django_testdir.create_app_file("""
from django.shortcuts import render
def invalid_template(request):
return render(request, 'invalid_template.html', {})
""", 'views.py')
django_testdir.create_app_file(
"<div>{{ invalid_var }}</div>",
'templates/invalid_template.html'
)
django_testdir.create_test_module('''
import pytest
def test_for_invalid_template(client):
client.get('/invalid_template/')
@pytest.mark.ignore_template_errors
def test_ignore(client):
client.get('/invalid_template/')
''')
result = django_testdir.runpytest_subprocess('-s', '--fail-on-template-vars')
result.stdout.fnmatch_lines_random([
"tpkg/test_the_test.py F.",
"Undefined template variable 'invalid_var' in 'invalid_template.html'",
])
@pytest.mark.django_project(extra_settings="""
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'tpkg.app.urls'
""")
def test_invalid_template_variable_opt_in(django_testdir):
django_testdir.create_app_file("""
from django.conf.urls import url
from pytest_django_test.compat import patterns
from tpkg.app import views
urlpatterns = patterns(
'',
url(r'invalid_template/', views.invalid_template),
)
""", 'urls.py')
django_testdir.create_app_file("""
from django.shortcuts import render
def invalid_template(request):
return render(request, 'invalid_template.html', {})
""", 'views.py')
django_testdir.create_app_file(
"<div>{{ invalid_var }}</div>",
'templates/invalid_template.html'
)
django_testdir.create_test_module('''
import pytest
def test_for_invalid_template(client):
client.get('/invalid_template/')
@pytest.mark.ignore_template_errors
def test_ignore(client):
client.get('/invalid_template/')
''')
result = django_testdir.runpytest_subprocess('-s')
result.stdout.fnmatch_lines_random([
"tpkg/test_the_test.py ..",
])
@pytest.mark.django_db
def test_database_rollback():
assert Item.objects.count() == 0
Item.objects.create(name='blah')
assert Item.objects.count() == 1
@pytest.mark.django_db
def test_database_rollback_again():
test_database_rollback()
def test_database_name():
name = connection.settings_dict['NAME']
assert name == ':memory:' or name.startswith('test_')
def test_database_noaccess():
with pytest.raises(pytest.fail.Exception):
Item.objects.count()
class TestrunnerVerbosity:
"""Test that Django's code to setup and teardown the databases uses
pytest's verbosity level."""
@pytest.fixture
def testdir(self, django_testdir):
print("testdir")
django_testdir.create_test_module('''
import pytest
@pytest.mark.django_db
def test_inner_testrunner():
pass
''')
return django_testdir
def test_default(self, testdir):
"""Not verbose by default."""
result = testdir.runpytest_subprocess('-s')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py ."])
def test_vq_verbosity_0(self, testdir):
"""-v and -q results in verbosity 0."""
result = testdir.runpytest_subprocess('-s', '-v', '-q')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py ."])
def test_verbose_with_v(self, testdir):
"""Verbose output with '-v'."""
result = testdir.runpytest_subprocess('-s', '-v')
result.stdout.fnmatch_lines_random([
"tpkg/test_the_test.py:*",
"*PASSED*",
"*Destroying test database for alias 'default'...*"])
def test_more_verbose_with_vv(self, testdir):
"""More verbose output with '-v -v'."""
result = testdir.runpytest_subprocess('-s', '-v', '-v')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py:*Creating test database for alias*",
"*Creating table app_item*",
"*PASSED*Destroying test database for alias 'default' ('*')...*"])
def test_more_verbose_with_vv_and_reusedb(self, testdir):
"""More verbose output with '-v -v', and --create-db."""
result = testdir.runpytest_subprocess('-s', '-v', '-v', '--create-db')
result.stdout.fnmatch_lines([
"tpkg/test_the_test.py:*Creating test database for alias*",
"*PASSED*"])
assert ("*Destroying test database for alias 'default' ('*')...*"
not in result.stdout.str())
|
tomviner/pytest-django
|
tests/test_environment.py
|
Python
|
bsd-3-clause
| 6,155
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
ignore_warnings,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@ignore_warnings
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
|
toastedcornflakes/scikit-learn
|
sklearn/tests/test_calibration.py
|
Python
|
bsd-3-clause
| 12,016
|
import re
from project.models import PageChunk
contacts_map_coordinates = \
re.compile(
r".*"
r"@(?P<latitude>\-?[\d\.]+),"
r"(?P<longitude>\-?[\d\.]+),"
r"(?P<zoom>[\d\.]+)z"
r".*"
)
def inject_pagechunks():
chunks = {chunk.name: chunk.text for chunk in PageChunk.query.all()}
return {"pagechunks": chunks}
|
uaprom-summer-2015/Meowth
|
project/utils.py
|
Python
|
bsd-3-clause
| 368
|
from openstatesapi.jurisdiction import make_jurisdiction
J = make_jurisdiction('nd')
J.url = 'http://nd.gov'
|
sunlightlabs/billy
|
billy2pupa/nd.py
|
Python
|
bsd-3-clause
| 110
|
"""
sentry.web.forms.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import pytz
import six
from datetime import datetime
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model
from django.db.models import Q
from django.utils.text import capfirst, mark_safe
from django.utils.translation import ugettext_lazy as _
from sentry import newsletter, options
from sentry.auth import password_validation
from sentry.app import ratelimiter
from sentry.constants import LANGUAGES
from sentry.models import (Organization, OrganizationStatus, User, UserOption, UserOptionValue)
from sentry.security import capture_security_activity
from sentry.utils.auth import find_users, logger
from sentry.web.forms.fields import CustomTypedChoiceField, ReadOnlyTextField
from six.moves import range
def _get_timezone_choices():
results = []
for tz in pytz.common_timezones:
now = datetime.now(pytz.timezone(tz))
offset = now.strftime('%z')
results.append((int(offset), tz, '(UTC%s) %s' % (offset, tz)))
results.sort()
for i in range(len(results)):
results[i] = results[i][1:]
return results
TIMEZONE_CHOICES = _get_timezone_choices()
class AuthenticationForm(forms.Form):
username = forms.CharField(
label=_('Account'),
max_length=128,
widget=forms.TextInput(attrs={
'placeholder': _('username or email'),
'tabindex': 1,
}),
)
password = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput(attrs={
'placeholder': _('password'),
'tabindex': 2,
}),
)
error_messages = {
'invalid_login':
_(
"Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."
),
'rate_limited':
_("You have made too many failed authentication "
"attempts. Please try again later."),
'no_cookies':
_(
"Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."
),
'inactive':
_("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if not self.fields['username'].label:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
return value.lower()
def is_rate_limited(self):
if self._is_ip_rate_limited():
return True
if self._is_user_rate_limited():
return True
return False
def _is_ip_rate_limited(self):
limit = options.get('auth.ip-rate-limit')
if not limit:
return False
ip_address = self.request.META['REMOTE_ADDR']
return ratelimiter.is_limited(
'auth:ip:{}'.format(ip_address),
limit,
)
def _is_user_rate_limited(self):
limit = options.get('auth.user-rate-limit')
if not limit:
return False
username = self.cleaned_data.get('username')
if not username:
return False
return ratelimiter.is_limited(
u'auth:username:{}'.format(username),
limit,
)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if not (username and password):
raise forms.ValidationError(
self.error_messages['invalid_login'] %
{'username': self.username_field.verbose_name}
)
if self.is_rate_limited():
logger.info(
'user.auth.rate-limited',
extra={
'ip_address': self.request.META['REMOTE_ADDR'],
'username': username,
}
)
raise forms.ValidationError(self.error_messages['rate_limited'])
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'] %
{'username': self.username_field.verbose_name}
)
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordlessRegistrationForm(forms.ModelForm):
name = forms.CharField(
label=_('Name'),
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'Jane Doe'}),
required=True
)
username = forms.EmailField(
label=_('Email'),
max_length=128,
widget=forms.TextInput(attrs={'placeholder': 'you@example.com'}),
required=True
)
subscribe = CustomTypedChoiceField(
coerce=lambda x: six.text_type(x) == u'1',
label=_("Email updates"),
choices=(
(1, 'Yes, I would like to receive updates via email'),
(0, "No, I'd prefer not to receive these updates"),
),
widget=forms.RadioSelect,
required=True,
initial=False,
)
def __init__(self, *args, **kwargs):
super(PasswordlessRegistrationForm, self).__init__(*args, **kwargs)
if not newsletter.is_enabled():
del self.fields['subscribe']
else:
# NOTE: the text here is duplicated within the ``NewsletterConsent`` component
# in the UI
notice = (
"We'd love to keep you updated via email with product and feature "
"announcements, promotions, educational materials, and events. "
"Our updates focus on relevant information, and we'll never sell "
"your data to third parties. See our "
"<a href=\"{privacy_link}\">Privacy Policy</a> for more details."
)
self.fields['subscribe'].help_text = mark_safe(
notice.format(privacy_link=settings.PRIVACY_URL))
class Meta:
fields = ('username', 'name')
model = User
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
if User.objects.filter(username__iexact=value).exists():
raise forms.ValidationError(
_('An account is already registered with that email address.'))
return value.lower()
def save(self, commit=True):
user = super(PasswordlessRegistrationForm, self).save(commit=False)
user.email = user.username
if commit:
user.save()
if self.cleaned_data.get('subscribe'):
newsletter.create_or_update_subscriptions(
user, list_ids=newsletter.get_default_list_ids())
return user
class RegistrationForm(PasswordlessRegistrationForm):
password = forms.CharField(
required=True,
widget=forms.PasswordInput(attrs={'placeholder': 'something super secret'}),
)
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
if self.cleaned_data.get('subscribe'):
newsletter.create_or_update_subscriptions(
user, list_ids=newsletter.get_default_list_ids())
return user
class RecoverPasswordForm(forms.Form):
user = forms.CharField(
label=_('Account'),
max_length=128,
widget=forms.TextInput(attrs={'placeholder': _('username or email')}),
)
def clean_user(self):
value = (self.cleaned_data.get('user') or '').strip()
if not value:
return
users = find_users(value, with_valid_password=False)
if not users:
raise forms.ValidationError(_("We were unable to find a matching user."))
users = [u for u in users if not u.is_managed]
if not users:
raise forms.ValidationError(
_(
"The account you are trying to recover is managed and does not support password recovery."
)
)
if len(users) > 1:
raise forms.ValidationError(
_("Multiple accounts were found matching this email address.")
)
return users[0]
class ChangePasswordRecoverForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput())
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
class EmailForm(forms.Form):
alt_email = forms.EmailField(
label=_('New Email'),
required=False,
help_text='Designate an alternative email for this account',
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text=_('You will need to enter your current account password to make changes.'),
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(EmailForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError(_('The password you entered is not correct.'))
elif not value:
raise forms.ValidationError(
_('You must confirm your current password to make changes.')
)
return value
class AccountSettingsForm(forms.Form):
name = forms.CharField(required=True, label=_('Name'), max_length=30)
username = forms.CharField(label=_('Username'), max_length=128)
email = forms.EmailField(label=_('Email'))
new_password = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput(),
required=False,
# help_text=password_validation.password_validators_help_text_html(),
)
verify_new_password = forms.CharField(
label=_('Verify new password'),
widget=forms.PasswordInput(),
required=False,
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current account password to make changes.',
required=False,
)
def __init__(self, user, request, *args, **kwargs):
self.user = user
self.request = request
super(AccountSettingsForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if self.user.is_managed:
# username and password always managed, email and
# name optionally managed
for field in ('email', 'name', 'username'):
if field == 'username' or field in settings.SENTRY_MANAGED_USER_FIELDS:
self.fields[field] = ReadOnlyTextField(label=self.fields[field].label)
if field == 'email':
needs_password = False
del self.fields['new_password']
del self.fields['verify_new_password']
# don't show username field if its the same as their email address
if self.user.email == self.user.username:
del self.fields['username']
if not needs_password:
del self.fields['password']
def is_readonly(self):
if self.user.is_managed:
return set(('email', 'name')) == set(settings.SENTRY_MANAGED_USER_FIELDS)
return False
def _clean_managed_field(self, field):
if self.user.is_managed and (
field == 'username' or field in settings.SENTRY_MANAGED_USER_FIELDS
):
return getattr(self.user, field)
return self.cleaned_data[field]
def clean_email(self):
value = self._clean_managed_field('email').lower()
if self.user.email.lower() == value:
return value
if User.objects.filter(Q(email__iexact=value) | Q(username__iexact=value)).exclude(
id=self.user.id
).exists():
raise forms.ValidationError(
_("There was an error adding %s: that email is already in use") %
self.cleaned_data['email']
)
return value
def clean_name(self):
return self._clean_managed_field('name')
def clean_username(self):
value = self._clean_managed_field('username')
if User.objects.filter(username__iexact=value).exclude(id=self.user.id).exists():
raise forms.ValidationError(_("That username is already in use."))
return value
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError('The password you entered is not correct.')
elif not value and (
self.cleaned_data.get('email', self.user.email) != self.user.email or
self.cleaned_data.get('new_password')
):
raise forms.ValidationError('You must confirm your current password to make changes.')
return value
def clean_verify_new_password(self):
new_password = self.cleaned_data.get('new_password')
if new_password:
verify_new_password = self.cleaned_data.get('verify_new_password')
if verify_new_password is None:
raise forms.ValidationError('You must verify your new password.')
if new_password != verify_new_password:
raise forms.ValidationError('Your new password and verify new password must match.')
return verify_new_password
def clean_new_password(self):
new_password = self.cleaned_data.get('new_password')
if new_password:
password_validation.validate_password(new_password)
return new_password
def save(self, commit=True):
if self.cleaned_data.get('new_password'):
self.user.set_password(self.cleaned_data['new_password'])
self.user.refresh_session_nonce(self.request)
capture_security_activity(
account=self.user,
type='password-changed',
actor=self.request.user,
ip_address=self.request.META['REMOTE_ADDR'],
send_email=True,
)
self.user.name = self.cleaned_data['name']
if self.cleaned_data['email'] != self.user.email:
new_username = self.user.email == self.user.username
else:
new_username = False
self.user.email = self.cleaned_data['email']
if self.cleaned_data.get('username'):
self.user.username = self.cleaned_data['username']
elif new_username and not User.objects.filter(username__iexact=self.user.email).exists():
self.user.username = self.user.email
if commit:
self.user.save()
return self.user
class AppearanceSettingsForm(forms.Form):
language = forms.ChoiceField(
label=_('Language'),
choices=LANGUAGES,
required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'})
)
stacktrace_order = forms.ChoiceField(
label=_('Stacktrace order'),
choices=(
('-1', _('Default (let Sentry decide)')), ('1', _('Most recent call last')),
('2', _('Most recent call first')),
),
help_text=_('Choose the default ordering of frames in stacktraces.'),
required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'})
)
timezone = forms.ChoiceField(
label=_('Time zone'),
choices=TIMEZONE_CHOICES,
required=False,
widget=forms.Select(attrs={'class': 'input-xxlarge'})
)
clock_24_hours = forms.BooleanField(
label=_('Use a 24-hour clock'),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AppearanceSettingsForm, self).__init__(*args, **kwargs)
def save(self):
# Save user language
UserOption.objects.set_value(
user=self.user,
key='language',
value=self.cleaned_data['language'],
)
# Save stacktrace options
UserOption.objects.set_value(
user=self.user,
key='stacktrace_order',
value=self.cleaned_data['stacktrace_order'],
)
# Save time zone options
UserOption.objects.set_value(
user=self.user,
key='timezone',
value=self.cleaned_data['timezone'],
)
# Save clock 24 hours option
UserOption.objects.set_value(
user=self.user,
key='clock_24_hours',
value=self.cleaned_data['clock_24_hours'],
)
return self.user
class NotificationReportSettingsForm(forms.Form):
organizations = forms.ModelMultipleChoiceField(
queryset=Organization.objects.none(),
required=False,
widget=forms.CheckboxSelectMultiple(),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationReportSettingsForm, self).__init__(*args, **kwargs)
org_queryset = Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
member_set__user=user,
)
disabled_orgs = set(
UserOption.objects.get_value(
user=user,
key='reports:disabled-organizations',
default=[],
)
)
self.fields['organizations'].queryset = org_queryset
self.fields['organizations'].initial = [
o.id for o in org_queryset if o.id not in disabled_orgs
]
def save(self):
enabled_orgs = set((o.id for o in self.cleaned_data.get('organizations')))
all_orgs = set(self.fields['organizations'].queryset.values_list('id', flat=True))
UserOption.objects.set_value(
user=self.user,
key='reports:disabled-organizations',
value=list(all_orgs.difference(enabled_orgs)),
)
class NotificationDeploySettingsForm(forms.Form):
CHOICES = [
(UserOptionValue.all_deploys, _('All deploys')),
(UserOptionValue.committed_deploys_only,
_('Deploys with your commits')), (UserOptionValue.no_deploys, _('Never'))
]
notifications = forms.ChoiceField(
choices=CHOICES,
required=False,
widget=forms.RadioSelect(),
)
def __init__(self, user, organization, *args, **kwargs):
self.user = user
self.organization = organization
super(NotificationDeploySettingsForm, self).__init__(*args, **kwargs)
self.fields['notifications'].label = "" # hide the label
deploy_setting = UserOption.objects.get_value(
user=user,
organization=self.organization,
key='deploy-emails',
default=UserOptionValue.committed_deploys_only,
)
self.fields['notifications'].initial = deploy_setting
def save(self):
value = self.data.get('{}-notifications'.format(self.prefix), None)
if value is not None:
UserOption.objects.set_value(
user=self.user,
organization=self.organization,
key='deploy-emails',
value=value,
)
class NotificationSettingsForm(forms.Form):
alert_email = forms.EmailField(
label=_('Email'),
help_text=_('Designate an alternative email address to send email notifications to.'),
required=False
)
subscribe_by_default = forms.BooleanField(
label=_('Automatically subscribe to alerts for new projects'),
help_text=_(
"When enabled, you'll automatically subscribe to alerts when you create or join a project."
),
required=False,
)
workflow_notifications = forms.ChoiceField(
label=_('Preferred workflow subscription level for new projects'),
choices=[
(UserOptionValue.all_conversations, "Receive workflow updates for all issues."),
(UserOptionValue.participating_only,
"Receive workflow updates only for issues that I am participating in or have subscribed to."),
(UserOptionValue.no_conversations, "Never receive workflow updates."),
],
help_text=_("This will be automatically set as your subscription preference when you create or join a project. It has no effect on existing projects."),
required=False,
)
self_notifications = forms.BooleanField(
label=_('Receive notifications about my own activity'),
help_text=_(
'Enable this if you wish to receive emails for your own actions, as well as others.'
),
required=False,
)
self_assign_issue = forms.BooleanField(
label=_('Claim unassigned issues when resolving them'),
help_text=_(
"When enabled, you'll automatically be assigned to unassigned issues when marking them as resolved."
),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationSettingsForm, self).__init__(*args, **kwargs)
self.fields['alert_email'].initial = UserOption.objects.get_value(
user=self.user,
key='alert_email',
default=user.email,
)
self.fields['subscribe_by_default'].initial = (
UserOption.objects.get_value(
user=self.user,
key='subscribe_by_default',
default='1',
) == '1'
)
self.fields['workflow_notifications'].initial = UserOption.objects.get_value(
user=self.user,
key='workflow:notifications',
default=UserOptionValue.all_conversations,
project=None,
)
self.fields['self_notifications'].initial = UserOption.objects.get_value(
user=self.user, key='self_notifications', default='0'
) == '1'
self.fields['self_assign_issue'].initial = UserOption.objects.get_value(
user=self.user, key='self_assign_issue', default='0'
) == '1'
def get_title(self):
return "General"
def save(self):
UserOption.objects.set_value(
user=self.user,
key='alert_email',
value=self.cleaned_data['alert_email'],
)
UserOption.objects.set_value(
user=self.user,
key='subscribe_by_default',
value='1' if self.cleaned_data['subscribe_by_default'] else '0',
)
UserOption.objects.set_value(
user=self.user,
key='self_notifications',
value='1' if self.cleaned_data['self_notifications'] else '0',
)
UserOption.objects.set_value(
user=self.user,
key='self_assign_issue',
value='1' if self.cleaned_data['self_assign_issue'] else '0',
)
workflow_notifications_value = self.cleaned_data.get('workflow_notifications')
if not workflow_notifications_value:
UserOption.objects.unset_value(
user=self.user,
key='workflow:notifications',
project=None,
)
else:
UserOption.objects.set_value(
user=self.user,
key='workflow:notifications',
value=workflow_notifications_value,
project=None,
)
class ProjectEmailOptionsForm(forms.Form):
alert = forms.BooleanField(required=False)
workflow = forms.ChoiceField(
choices=[
(UserOptionValue.no_conversations, 'Nothing'),
(UserOptionValue.participating_only, 'Participating'),
(UserOptionValue.all_conversations, 'Everything'),
],
)
email = forms.ChoiceField(label="", choices=(), required=False,
widget=forms.Select())
def __init__(self, project, user, *args, **kwargs):
self.project = project
self.user = user
super(ProjectEmailOptionsForm, self).__init__(*args, **kwargs)
has_alerts = project.is_user_subscribed_to_mail_alerts(user)
# This allows users who have entered an alert_email value or have specified an email
# for notifications to keep their settings
emails = [e.email for e in user.get_verified_emails()]
alert_email = UserOption.objects.get_value(self.user, 'alert_email')
specified_email = UserOption.objects.get_value(self.user, 'mail:email', project=project)
emails.extend([user.email, alert_email, specified_email])
choices = [(email, email) for email in sorted(set(emails)) if email]
self.fields['email'].choices = choices
self.fields['alert'].initial = has_alerts
self.fields['workflow'].initial = UserOption.objects.get_value(
user=self.user,
project=self.project,
key='workflow:notifications',
default=UserOption.objects.get_value(
user=self.user,
project=None,
key='workflow:notifications',
default=UserOptionValue.all_conversations,
),
)
self.fields['email'].initial = specified_email or alert_email or user.email
def save(self):
UserOption.objects.set_value(
user=self.user,
key='mail:alert',
value=int(self.cleaned_data['alert']),
project=self.project,
)
UserOption.objects.set_value(
user=self.user,
key='workflow:notifications',
value=self.cleaned_data['workflow'],
project=self.project,
)
if self.cleaned_data['email']:
UserOption.objects.set_value(
user=self.user,
key='mail:email',
value=self.cleaned_data['email'],
project=self.project,
)
else:
UserOption.objects.unset_value(self.user, self.project, 'mail:email')
class TwoFactorForm(forms.Form):
otp = forms.CharField(
label=_('Authenticator code'),
max_length=20,
widget=forms.TextInput(
attrs={
'placeholder': _('Code from authenticator'),
'autofocus': True,
}
),
)
class ConfirmPasswordForm(forms.Form):
password = forms.CharField(
label=_('Sentry account password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current Sentry account password to make changes.',
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(ConfirmPasswordForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError(_('The password you entered is not correct.'))
elif not value:
raise forms.ValidationError(
_('You must confirm your current password to make changes.')
)
return value
|
looker/sentry
|
src/sentry/web/forms/accounts.py
|
Python
|
bsd-3-clause
| 29,165
|
from __future__ import print_function
import string
import sys
from collections import deque
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
from . import _hmmc
from .utils import normalize, logsumexp, iter_from_X_lengths
DECODER_ALGORITHMS = frozenset(("viterbi", "map"))
class ConvergenceMonitor(object):
"""Monitors and reports convergence to :data:`sys.stderr`.
Parameters
----------
tol : double
Convergence threshold. EM has converged either if the maximum
number of iterations is reached or the log probability
improvement between the two consecutive iterations is less
than threshold.
n_iter : int
Maximum number of iterations to perform.
verbose : bool
If ``True`` then per-iteration convergence reports are printed,
otherwise the monitor is mute.
Attributes
----------
history : deque
The log probability of the data for the last two training
iterations. If the values are not strictly increasing, the
model did not converge.
iter : int
Number of iterations performed while training the model.
"""
fmt = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}"
def __init__(self, tol, n_iter, verbose):
self.tol = tol
self.n_iter = n_iter
self.verbose = verbose
self.history = deque(maxlen=2)
self.iter = 1
def report(self, logprob):
if self.history and self.verbose:
delta = logprob - self.history[-1]
message = self.fmt.format(
iter=self.iter, logprob=logprob, delta=delta)
print(message, file=sys.stderr)
self.history.append(logprob)
self.iter += 1
@property
def converged(self):
return (self.iter == self.n_iter or
(len(self.history) == 2 and
self.history[1] - self.history[0] < self.tol))
class _BaseHMM(BaseEstimator):
"""Hidden Markov Model base class.
Representation of a hidden Markov model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a HMM.
See the instance documentation for details specific to a
particular object.
Parameters
----------
n_components : int
Number of states in the model.
startprob_prior : array, shape (n_components, )
Initial state occupation prior distribution.
transmat_prior : array, shape (n_components, n_components)
Matrix of prior transition probabilities between states.
algorithm : string, one of the ``DECODER_ALGORITHMS```
Decoder algorithm.
random_state: RandomState or an int seed (0 by default)
A random number generator instance.
n_iter : int, optional
Maximum number of iterations to perform.
tol : float, optional
Convergence threshold. EM will stop if the gain in log-likelihood
is below this value.
verbose : bool, optional
When ``True`` per-iteration convergence reports are printed
to :data:`sys.stderr`. You can diagnose convergence via the
:attr:`monitor_` attribute.
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 's' for startprob,
't' for transmat, and other characters for subclass-specific
emission parameters. Defaults to all parameters.
init_params : string, optional
Controls which parameters are initialized prior to
training. Can contain any combination of 's' for
startprob, 't' for transmat, and other characters for
subclass-specific emission parameters. Defaults to all
parameters.
Attributes
----------
monitor_ : ConvergenceMonitor
Monitor object used to check the convergence of EM.
startprob_ : array, shape (n_components, )
Initial state occupation distribution.
transmat_ : array, shape (n_components, n_components)
Matrix of transition probabilities between states.
"""
# This class implements the public interface to all HMMs that
# derive from it, including all of the machinery for the
# forward-backward and Viterbi algorithms. Subclasses need only
# implement _generate_sample_from_state(), _compute_log_likelihood(),
# _init(), _initialize_sufficient_statistics(),
# _accumulate_sufficient_statistics(), and _do_mstep(), all of
# which depend on the specific emission distribution.
#
# Subclasses will probably also want to implement properties for
# the emission distribution parameters to expose them publicly.
def __init__(self, n_components=1,
startprob_prior=1.0, transmat_prior=1.0,
algorithm="viterbi", random_state=None,
n_iter=10, tol=1e-2, verbose=False,
params=string.ascii_letters,
init_params=string.ascii_letters):
self.n_components = n_components
self.params = params
self.init_params = init_params
self.startprob_prior = startprob_prior
self.transmat_prior = transmat_prior
self.algorithm = algorithm
self.random_state = random_state
self.n_iter = n_iter
self.tol = tol
self.verbose = verbose
def score_samples(self, X, lengths=None):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
posteriors = np.zeros((n_samples, self.n_components))
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
bwdlattice = self._do_backward_pass(framelogprob)
posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice)
return logprob, posteriors
def score(self, X, lengths=None):
"""Compute the log probability under the model.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
# XXX we can unroll forward pass for speed and memory efficiency.
logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, _fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
return logprob
def _decode_viterbi(self, X):
framelogprob = self._compute_log_likelihood(X)
return self._do_viterbi_pass(framelogprob)
def _decode_map(self, X):
_, posteriors = self.score_samples(X)
logprob = np.max(posteriors, axis=1).sum()
state_sequence = np.argmax(posteriors, axis=1)
return logprob, state_sequence
def decode(self, X, lengths=None, algorithm=None):
"""Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
algorithm : string, one of the ``DECODER_ALGORITHMS``
decoder algorithm to be used
Returns
-------
logprob : float
Log probability of the produced state sequence.
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X`` obtained via a given
decoder ``algorithm``.
See Also
--------
score_samples : Compute the log probability under the model and
posteriors.
score : Compute the log probability under the model.
"""
check_is_fitted(self, "startprob_")
self._check()
algorithm = algorithm or self.algorithm
if algorithm not in DECODER_ALGORITHMS:
raise ValueError("Unknown decoder {0!r}".format(algorithm))
decoder = {
"viterbi": self._decode_viterbi,
"map": self._decode_map
}[algorithm]
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
state_sequence = np.empty(n_samples, dtype=int)
for i, j in iter_from_X_lengths(X, lengths):
# XXX decoder works on a single sample at a time!
logprobij, state_sequenceij = decoder(X[i:j])
logprob += logprobij
state_sequence[i:j] = state_sequenceij
return logprob, state_sequence
def predict(self, X, lengths=None):
"""Find most likely state sequence corresponding to ``X``.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
state_sequence : array, shape (n_samples, )
Labels for each sample from ``X``.
"""
_, state_sequence = self.decode(X, lengths)
return state_sequence
def predict_proba(self, X, lengths=None):
"""Compute the posterior probability for each state in the model.
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample from ``X``.
"""
_, posteriors = self.score_samples(X, lengths)
return posteriors
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int
Number of samples to generate.
random_state: RandomState or an int seed (0 by default)
A random number generator instance. If ``None``, the object's
random_state is used.
Returns
-------
X : array, shape (n_samples, n_features)
Feature matrix.
state_sequence : array, shape (n_samples, )
State sequence produced by the model.
"""
check_is_fitted(self, "startprob_")
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
startprob_cdf = np.cumsum(self.startprob_)
transmat_cdf = np.cumsum(self.transmat_, axis=1)
currstate = (startprob_cdf > random_state.rand()).argmax()
state_sequence = [currstate]
X = [self._generate_sample_from_state(
currstate, random_state=random_state)]
for t in range(n_samples - 1):
currstate = (transmat_cdf[currstate] > random_state.rand()) \
.argmax()
state_sequence.append(currstate)
X.append(self._generate_sample_from_state(
currstate, random_state=random_state))
return np.atleast_2d(X), np.array(state_sequence, dtype=int)
def fit(self, X, lengths=None):
"""Estimate model parameters.
An initialization step is performed before entering the
EM-algorithm. If you want to avoid this step for a subset of
the parameters, pass proper ``init_params`` keyword argument
to estimator's constructor.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, )
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
self : object
Returns self.
"""
X = check_array(X)
self._init(X, lengths=lengths, params=self.init_params)
self._check()
self.monitor_ = ConvergenceMonitor(self.tol, self.n_iter, self.verbose)
for iter in range(self.n_iter):
stats = self._initialize_sufficient_statistics()
curr_logprob = 0
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprob, fwdlattice = self._do_forward_pass(framelogprob)
curr_logprob += logprob
bwdlattice = self._do_backward_pass(framelogprob)
posteriors = self._compute_posteriors(fwdlattice, bwdlattice)
self._accumulate_sufficient_statistics(
stats, X[i:j], framelogprob, posteriors, fwdlattice,
bwdlattice, self.params)
self.monitor_.report(curr_logprob)
if self.monitor_.converged:
break
self._do_mstep(stats, self.params)
return self
def _do_viterbi_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
state_sequence, logprob = _hmmc._viterbi(
n_observations, n_components, np.log(self.startprob_),
np.log(self.transmat_), framelogprob)
return logprob, state_sequence
def _do_forward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
fwdlattice = np.zeros((n_observations, n_components))
_hmmc._forward(n_observations, n_components, np.log(self.startprob_),
np.log(self.transmat_), framelogprob, fwdlattice)
return logsumexp(fwdlattice[-1]), fwdlattice
def _do_backward_pass(self, framelogprob):
n_observations, n_components = framelogprob.shape
bwdlattice = np.zeros((n_observations, n_components))
_hmmc._backward(n_observations, n_components, np.log(self.startprob_),
np.log(self.transmat_), framelogprob, bwdlattice)
return bwdlattice
def _compute_posteriors(self, fwdlattice, bwdlattice):
log_gamma = fwdlattice + bwdlattice
# gamma is guaranteed to be correctly normalized by logprob at
# all frames, unless we do approximate inference using pruning.
# So, we will normalize each frame explicitly in case we
# pruned too aggressively.
log_gamma += np.finfo(float).eps
log_gamma -= logsumexp(log_gamma, axis=1)[:, np.newaxis]
out = np.exp(log_gamma)
normalize(out, axis=1)
return out
def _compute_log_likelihood(self, X):
pass
def _generate_sample_from_state(self, state, random_state=None):
pass
def _init(self, X, lengths, params):
init = 1. / self.n_components
if 's' in params or not hasattr(self, "startprob_"):
self.startprob_ = np.full(self.n_components, init)
if 't' in params or not hasattr(self, "transmat_"):
self.transmat_ = np.full((self.n_components, self.n_components),
init)
def _check(self):
self.startprob_ = np.asarray(self.startprob_)
if len(self.startprob_) != self.n_components:
raise ValueError("startprob_ must have length n_components")
if not np.allclose(self.startprob_.sum(), 1.0):
raise ValueError("startprob_ must sum to 1.0 (got {0:.4f})"
.format(self.startprob_.sum()))
self.transmat_ = np.asarray(self.transmat_)
if self.transmat_.shape != (self.n_components, self.n_components):
raise ValueError(
"transmat_ must have shape (n_components, n_components)")
if not np.allclose(self.transmat_.sum(axis=1), 1.0):
raise ValueError("rows of transmat_ must sum to 1.0 (got {0})"
.format(self.transmat_.sum(axis=1)))
# Methods used by self.fit()
def _initialize_sufficient_statistics(self):
stats = {'nobs': 0,
'start': np.zeros(self.n_components),
'trans': np.zeros((self.n_components, self.n_components))}
return stats
def _accumulate_sufficient_statistics(self, stats, seq, framelogprob,
posteriors, fwdlattice, bwdlattice,
params):
stats['nobs'] += 1
if 's' in params:
stats['start'] += posteriors[0]
if 't' in params:
n_observations, n_components = framelogprob.shape
# when the sample is of length 1, it contains no transitions
# so there is no reason to update our trans. matrix estimate
if n_observations <= 1:
return
lneta = np.zeros((n_observations - 1, n_components, n_components))
_hmmc._compute_lneta(n_observations, n_components, fwdlattice,
np.log(self.transmat_),
bwdlattice, framelogprob, lneta)
stats['trans'] += np.exp(logsumexp(lneta, axis=0))
def _do_mstep(self, stats, params):
# Based on Huang, Acero, Hon, "Spoken Language Processing",
# p. 443 - 445
if 's' in params:
self.startprob_ = self.startprob_prior - 1.0 + stats['start']
normalize(self.startprob_)
if 't' in params:
self.transmat_ = self.transmat_prior - 1.0 + stats['trans']
normalize(self.transmat_, axis=1)
|
stringertheory/hmmlearn
|
hmmlearn/base.py
|
Python
|
bsd-3-clause
| 19,415
|
import unittest
import numpy as np
from robo.models.bayesian_linear_regression import BayesianLinearRegression
class TestBayesianLinearRegression(unittest.TestCase):
def setUp(self):
self.X = np.random.rand(10, 1)
y = self.X * 2
self.y = y[:, 0]
self.model = BayesianLinearRegression(alpha=1, beta=1000)
self.model.train(self.X, self.y, do_optimize=False)
def test_predict(self):
X_test = np.random.rand(10, 1)
m, v = self.model.predict(X_test)
assert len(m.shape) == 1
assert m.shape[0] == X_test.shape[0]
assert len(v.shape) == 1
assert v.shape[0] == X_test.shape[0]
np.testing.assert_almost_equal(m, X_test[:, 0] * 2, decimal=2)
np.testing.assert_almost_equal(v, np.ones([v.shape[0]]) / 1000., decimal=3)
def test_marginal_log_likelihood(self):
theta = np.array([np.log(1), np.log(1000)])
mll = self.model.marginal_log_likelihood(theta)
def test_negative_mll(self):
theta = np.array([np.log(1), np.log(1000)])
mll = self.model.negative_mll(theta)
def test_get_incumbent(self):
inc, inc_val = self.model.get_incumbent()
b = np.argmin(self.y)
assert np.all(inc == self.X[b])
assert inc_val == self.y[b]
if __name__ == "__main__":
unittest.main()
|
numairmansur/RoBO
|
test/test_models/test_bayesian_linear_regression.py
|
Python
|
bsd-3-clause
| 1,352
|
"""
Package for handling logical expressions.
"""
from .boolalg import (ITE, And, Equivalent, Implies, Nand, Nor, Not, Or,
POSform, SOPform, Xor, bool_map, false, simplify_logic,
to_cnf, to_dnf, to_nnf, true)
from .inference import satisfiable
__all__ = ('ITE', 'And', 'Equivalent', 'Implies', 'Nand', 'Nor', 'Not', 'Or',
'POSform', 'SOPform', 'Xor', 'bool_map', 'false', 'simplify_logic',
'to_cnf', 'to_dnf', 'to_nnf', 'true', 'satisfiable')
|
skirpichev/omg
|
diofant/logic/__init__.py
|
Python
|
bsd-3-clause
| 512
|
# -*- coding: utf-8 -*-
"""
werkzeug.exceptiosn test
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD license.
"""
from nose.tools import assert_raises
from werkzeug import exceptions
from werkzeug.exceptions import Aborter, abort
from werkzeug.wrappers import Response
def test_proxy_exception():
"""Proxy exceptions"""
orig_resp = Response('Hello World')
try:
abort(orig_resp)
except exceptions.HTTPException, e:
resp = e.get_response({})
else:
assert False, 'exception not raised'
assert resp is orig_resp
assert resp.data == 'Hello World'
def test_aborter():
"""Exception aborter"""
assert_raises(exceptions.BadRequest, abort, 400)
assert_raises(exceptions.Unauthorized, abort, 401)
assert_raises(exceptions.Forbidden, abort, 403)
assert_raises(exceptions.NotFound, abort, 404)
assert_raises(exceptions.MethodNotAllowed, abort, 405, ['GET', 'HEAD'])
assert_raises(exceptions.NotAcceptable, abort, 406)
assert_raises(exceptions.RequestTimeout, abort, 408)
assert_raises(exceptions.Gone, abort, 410)
assert_raises(exceptions.LengthRequired, abort, 411)
assert_raises(exceptions.PreconditionFailed, abort, 412)
assert_raises(exceptions.RequestEntityTooLarge, abort, 413)
assert_raises(exceptions.RequestURITooLarge, abort, 414)
assert_raises(exceptions.UnsupportedMediaType, abort, 415)
assert_raises(exceptions.InternalServerError, abort, 500)
assert_raises(exceptions.NotImplemented, abort, 501)
assert_raises(exceptions.BadGateway, abort, 502)
assert_raises(exceptions.ServiceUnavailable, abort, 503)
myabort = Aborter({1: exceptions.NotFound})
assert_raises(LookupError, myabort, 404)
assert_raises(exceptions.NotFound, myabort, 1)
myabort = Aborter(extra={1: exceptions.NotFound})
assert_raises(exceptions.NotFound, myabort, 404)
assert_raises(exceptions.NotFound, myabort, 1)
def test_exception_repr():
"""Repr and unicode of exceptions"""
exc = exceptions.NotFound()
assert unicode(exc) == '404: Not Found'
assert repr(exc) == "<NotFound '404: Not Found'>"
exc = exceptions.NotFound('Not There')
assert unicode(exc) == '404: Not There'
assert repr(exc) == "<NotFound '404: Not There'>"
def test_special_exceptions():
"""Special HTTP exceptions"""
exc = exceptions.MethodNotAllowed(['GET', 'HEAD', 'POST'])
h = dict(exc.get_headers({}))
assert h['Allow'] == 'GET, HEAD, POST'
assert 'The method DELETE is not allowed' in exc.get_description({
'REQUEST_METHOD': 'DELETE'
})
|
r-kitaev/lucid-python-werkzeug
|
tests/test_exceptions.py
|
Python
|
bsd-3-clause
| 2,680
|
import os
import finder
import re
import sys
def makefilter(name, xtrapath=None):
typ, nm, fullname = finder.identify(name, xtrapath)
if typ in (finder.SCRIPT, finder.GSCRIPT, finder.MODULE):
return ModFilter([os.path.splitext(nm)[0]])
if typ == finder.PACKAGE:
return PkgFilter([fullname])
if typ == finder.DIRECTORY:
return DirFilter([fullname])
if typ in (finder.BINARY, finder.PBINARY):
return FileFilter([nm])
return FileFilter([fullname])
class _Filter:
def __repr__(self):
return '<'+self.__class__.__name__+' '+repr(self.elements)+'>'
class _NameFilter(_Filter):
""" A filter mixin that matches (exactly) on name """
def matches(self, res):
return self.elements.get(res.name, 0)
class _PathFilter(_Filter):
""" A filter mixin that matches if the resource is below any of the paths"""
def matches(self, res):
p = os.path.normcase(os.path.abspath(res.path))
while len(p) > 3:
p = os.path.dirname(p)
if self.elements.get(p, 0):
return 1
return 0
class _ExtFilter(_Filter):
""" A filter mixin that matches based on file extensions (either way) """
include = 0
def matches(self, res):
fnd = self.elements.get(os.path.splitext(res.path)[1], 0)
if self.include:
return not fnd
return fnd
class _TypeFilter(_Filter):
""" A filter mixin that matches on resource type (either way) """
include = 0
def matches(self, res):
fnd = self.elements.get(res.typ, 0)
if self.include:
return not fnd
return fnd
class _PatternFilter(_Filter):
""" A filter that matches if re.search succeeds on the resource path """
def matches(self, res):
for regex in self.elements:
if regex.search(res.path):
return 1
return 0
class ExtFilter(_ExtFilter):
""" A file extension filter.
ExtFilter(extlist, include=0)
where extlist is a list of file extensions """
def __init__(self, extlist, include=0):
self.elements = {}
for ext in extlist:
if ext[0:1] != '.':
ext = '.'+ext
self.elements[ext] = 1
self.include = include
class TypeFilter(_TypeFilter):
""" A filter for resource types.
TypeFilter(typlist, include=0)
where typlist is a subset of ['a','b','d','m','p','s','x','z'] """
def __init__(self, typlist, include=0):
self.elements = {}
for typ in typlist:
self.elements[typ] = 1
self.include = include
class FileFilter(_NameFilter):
""" A filter for data files """
def __init__(self, filelist):
self.elements = {}
for f in filelist:
self.elements[f] = 1
class ModFilter(_NameFilter):
""" A filter for Python modules.
ModFilter(modlist) where modlist is eg ['macpath', 'dospath'] """
def __init__(self, modlist):
self.elements = {}
for mod in modlist:
self.elements[mod] = 1
class DirFilter(_PathFilter):
""" A filter based on directories.
DirFilter(dirlist)
dirs may be relative and will be normalized.
Subdirectories of dirs will be excluded. """
def __init__(self, dirlist):
self.elements = {}
for pth in dirlist:
pth = os.path.normcase(os.path.abspath(pth))
self.elements[pth] = 1
class PkgFilter(_PathFilter):
"""At this time, identical to a DirFilter (being lazy) """
def __init__(self, pkglist):
#warning - pkgs are expected to be full directories
self.elements = {}
for pkg in pkglist:
pth = os.path.normcase(os.path.abspath(pkg))
self.elements[pth] = 1
class StdLibFilter(_PathFilter):
""" A filter that excludes anything found in the standard library """
def __init__(self):
pth = os.path.normcase(os.path.join(sys.exec_prefix, 'lib'))
self.elements = {pth:1}
class PatternFilter(_PatternFilter):
""" A filter that excludes if any pattern is found in resource's path """
def __init__(self, patterns):
self.elements = []
for pat in patterns:
self.elements.append(re.compile(pat))
|
toontownfunserver/Panda3D-1.9.0
|
direct/pyinst/tocfilter.py
|
Python
|
bsd-3-clause
| 4,386
|
from __future__ import absolute_import
from rest_framework.response import Response
from sentry.plugins.base import plugins
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.serializers import serialize
from sentry.api.serializers.models.plugin import PluginSerializer
class ProjectPluginsEndpoint(ProjectEndpoint):
def get(self, request, project):
context = serialize(
[plugin for plugin in plugins.configurable_for_project(project, version=None)],
request.user,
PluginSerializer(project),
)
return Response(context)
|
beeftornado/sentry
|
src/sentry/api/endpoints/project_plugins.py
|
Python
|
bsd-3-clause
| 605
|
# -*- coding: utf-8 -*-
import logging, logging.handlers
from django.conf import settings
def get_logger(name, level=logging.INFO, format='[%(asctime)s] %(message)s', handler=None, filename=None):
new_logger = logging.getLogger(name)
new_logger.setLevel(level)
if not handler:
filename = filename or '%s/logs/%s.log' % (settings.HOME_DIR, name)
handler = logging.FileHandler(filename)
handler.setFormatter(logging.Formatter(format))
new_logger.addHandler(handler)
return new_logger
if hasattr(settings, 'LOG_FILENAME') and not logger:
handler = logging.handlers.TimedRotatingFileHandler(settings.LOG_FILENAME, when = 'midnight')
logger = get_logger('default', handler=handler)
|
leliel12/handy
|
handy/logger.py
|
Python
|
bsd-3-clause
| 733
|
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.production')
from django.conf import settings
app = Celery('tecnoservicio.tareas')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
flipjack/tecnoservicio
|
config/settings/celery.py
|
Python
|
bsd-3-clause
| 588
|