repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
aarticianpc/greenpointtrees
|
refs/heads/master
|
greenpointtrees/bin/pilprint.py
|
2
|
#!/Volumes/SSDATA/www/greenpointtrees/greenpointtrees/bin/python
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = ( 1.0*72, 1.0*72, 7.5*72, 10.0*72 )
def description(file, image):
import os
title = os.path.splitext(os.path.split(file)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
import getopt, os, sys
if len(sys.argv) == 1:
print("PIL Print 0.2a1/96-10-04 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printer = None # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printer = "lpr"
elif o == "-P":
# printer channel
printer = "lpr -P%s" % a
for file in argv:
try:
im = Image.open(file)
title = description(file, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printer:
fp = os.popen(printer, "w")
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
WillGuan105/django
|
refs/heads/master
|
tests/m2m_multiple/__init__.py
|
12133432
| |
orwell-int/proxy-simulator
|
refs/heads/master
|
orwell/proxy_simulator/beta_tanks.py
|
1
|
import ode
import orwell.proxy_simulator.tanks as tanks
class Tank3(tanks.BaseTank):
def __init__(self, robot_descriptor):
super(Tank3, self).__init__(robot_descriptor)
def create_objects(self, world):
print 'chassis'
# chassis
density = 10
lx, ly, lz = (8, 0.5, 8)
# Create body
body = ode.Body(world.world)
mass = ode.Mass()
mass.setBox(density, lx, ly, lz)
body.setMass(mass)
# Set parameters for drawing the body
body.shape = "box"
body.boxsize = (lx, ly, lz)
# Create a box geom for collision detection
geom = ode.GeomBox(world.space, lengths=body.boxsize)
geom.setBody(body)
#body.setPosition((0, 3, 0))
world.add_body(body)
world.add_geom(geom)
self._chassis_body = body
density = 1
print 'left wheel'
# left wheel
radius = 1
height = 0.2
px, py, pz = (lx / 2, 0, -(lz / 2))
left_wheel_body = ode.Body(world.world)
wheel_mass = ode.Mass()
#wheel_mass.setSphere(density, radius)
wheel_mass.setCylinder(density, 1, radius, height)
left_wheel_body.setMass(wheel_mass)
#left_wheel_geom = ode.GeomSphere(world.space, radius=radius)
left_wheel_geom = ode.GeomCylinder(world.space, radius=radius,
length=height)
left_wheel_geom.setBody(left_wheel_body)
#left_wheel_body.setPosition((px, py, pz))
left_wheel_body.setRotation((0, 0, 1,
0, 1, 0,
-1, 0, 0))
left_wheel_body.setPosition((px - height / 2, py, pz))
world.add_body(left_wheel_body)
world.add_geom(left_wheel_geom)
print 'right wheel'
# right wheel
#radius = 1
px = -lx / 2
right_wheel_body = ode.Body(world.world)
wheel_mass = ode.Mass()
#wheel_mass.setSphere(density, radius)
wheel_mass.setCylinder(density, 1, radius, height)
right_wheel_body.setMass(wheel_mass)
#right_wheel_geom = ode.GeomSphere(world.space, radius=radius)
right_wheel_geom = ode.GeomCylinder(world.space, radius=radius,
length=height)
right_wheel_geom.setBody(right_wheel_body)
#right_wheel_body.setPosition((px, py, pz))
right_wheel_body.setRotation((0, 0, 1,
0, 1, 0,
-1, 0, 0))
right_wheel_body.setPosition((px - height / 2, py, pz))
world.add_body(right_wheel_body)
world.add_geom(right_wheel_geom)
print 'front wheel'
# front wheel
#radius = 1
px, py, pz = (0, 0, lz / 2)
front_wheel_body = ode.Body(world.world)
wheel_mass = ode.Mass()
wheel_mass.setSphere(density, radius)
front_wheel_body.setMass(wheel_mass)
front_wheel_geom = ode.GeomSphere(world.space, radius=radius)
front_wheel_geom.setBody(front_wheel_body)
front_wheel_body.setPosition((px, py, pz))
world.add_body(front_wheel_body)
world.add_geom(front_wheel_geom)
#left_wheel_joint = ode.Hinge2Joint(world.world)
left_wheel_joint = ode.HingeJoint(world.world)
left_wheel_joint.attach(body, left_wheel_body)
left_wheel_joint.setAnchor(left_wheel_body.getPosition())
left_wheel_joint.setAxis((-1, 0, 0))
#left_wheel_joint.setAxis1((0, 1, 0))
#left_wheel_joint.setAxis2((1, 0, 0))
left_wheel_joint.setParam(ode.ParamFMax, 500000)
#left_wheel_joint.setParam(ode.ParamLoStop, 0)
#left_wheel_joint.setParam(ode.ParamHiStop, 0)
#left_wheel_joint.setParam(ode.ParamFMax2, 0.1)
#left_wheel_joint.setParam(ode.ParamSuspensionERP, 0.2)
#left_wheel_joint.setParam(ode.ParamSuspensionCFM, 0.1)
self._left_wheel_joints.append(left_wheel_joint)
#right_wheel_joint = ode.Hinge2Joint(world.world)
right_wheel_joint = ode.HingeJoint(world.world)
right_wheel_joint.attach(body, right_wheel_body)
right_wheel_joint.setAnchor(right_wheel_body.getPosition())
right_wheel_joint.setAxis((-1, 0, 0))
#right_wheel_joint.setAxis1((0, 1, 0))
#right_wheel_joint.setAxis2((1, 0, 0))
right_wheel_joint.setParam(ode.ParamFMax, 500000)
#right_wheel_joint.setParam(ode.ParamLoStop, 0)
#right_wheel_joint.setParam(ode.ParamHiStop, 0)
#right_wheel_joint.setParam(ode.ParamFMax2, 0.1)
#right_wheel_joint.setParam(ode.ParamSuspensionERP, 0.2)
#right_wheel_joint.setParam(ode.ParamSuspensionCFM, 0.1)
self._right_wheel_joints.append(right_wheel_joint)
front_wheel_joint = ode.BallJoint(world.world)
front_wheel_joint.attach(body, front_wheel_body)
front_wheel_joint.setAnchor(front_wheel_body.getPosition())
front_wheel_joint.setParam(ode.ParamFMax, 5000)
class TankWithSpheres(tanks.BaseTank):
def __init__(self, robot_descriptor):
super(TankWithSpheres, self).__init__(robot_descriptor)
def create_objects(self, world):
print 'chassis'
# chassis
density = 10
lx, ly, lz = (8, 0.5, 8)
# Create body
body = ode.Body(world.world)
mass = ode.Mass()
mass.setBox(density, lx, ly, lz)
body.setMass(mass)
# Set parameters for drawing the body
body.shape = "box"
body.boxsize = (lx, ly, lz)
# Create a box geom for collision detection
geom = ode.GeomBox(world.space, lengths=body.boxsize)
geom.setBody(body)
#body.setPosition((0, 3, 0))
world.add_body(body)
world.add_geom(geom)
self._chassis_body = body
density = 1
print 'left wheel'
# left wheel
radius = 1
height = 0.2
px, py, pz = (lx / 2, 0, -(lz / 2))
left_wheel_body = ode.Body(world.world)
wheel_mass = ode.Mass()
wheel_mass.setSphere(density, radius)
left_wheel_body.setMass(wheel_mass)
left_wheel_geom = ode.GeomSphere(world.space, radius=radius)
left_wheel_geom.setBody(left_wheel_body)
left_wheel_body.setPosition((px, py, pz))
world.add_body(left_wheel_body)
world.add_geom(left_wheel_geom)
print 'right wheel'
# right wheel
#radius = 1
px = -(lx / 2)
right_wheel_body = ode.Body(world.world)
wheel_mass = ode.Mass()
wheel_mass.setSphere(density, radius)
right_wheel_body.setMass(wheel_mass)
right_wheel_geom = ode.GeomSphere(world.space, radius=radius)
right_wheel_geom.setBody(right_wheel_body)
right_wheel_body.setPosition((px, py, pz))
world.add_body(right_wheel_body)
world.add_geom(right_wheel_geom)
print 'front wheel'
# front wheel
#radius = 1
px, py, pz = (0, 0, lz / 2)
front_wheel_body = ode.Body(world.world)
wheel_mass = ode.Mass()
wheel_mass.setSphere(density, radius)
front_wheel_body.setMass(wheel_mass)
front_wheel_geom = ode.GeomSphere(world.space, radius=radius)
front_wheel_geom.setBody(front_wheel_body)
front_wheel_body.setPosition((px, py, pz))
world.add_body(front_wheel_body)
world.add_geom(front_wheel_geom)
left_wheel_joint = ode.HingeJoint(world.world)
left_wheel_joint.attach(body, left_wheel_body)
left_wheel_joint.setAnchor(left_wheel_body.getPosition())
left_wheel_joint.setAxis((-1, 0, 0))
left_wheel_joint.setParam(ode.ParamFMax, 500000)
self._left_wheel_joints.append(left_wheel_joint)
right_wheel_joint = ode.HingeJoint(world.world)
right_wheel_joint.attach(body, right_wheel_body)
right_wheel_joint.setAnchor(right_wheel_body.getPosition())
right_wheel_joint.setAxis((-1, 0, 0))
right_wheel_joint.setParam(ode.ParamFMax, 500000)
self._right_wheel_joints.append(right_wheel_joint)
front_wheel_joint = ode.BallJoint(world.world)
front_wheel_joint.attach(body, front_wheel_body)
front_wheel_joint.setAnchor(front_wheel_body.getPosition())
front_wheel_joint.setParam(ode.ParamFMax, 5000)
|
drewis/android_kernel_msm
|
refs/heads/android-msm-mako-3.4-jb-mr1
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
|
12980
|
# SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
renatoGarcia/tangram
|
refs/heads/master
|
tangram/recipes/__init__.py
|
1
|
# Copyright 2017 The Tangram Developers. See the AUTHORS file at the
# top-level directory of this distribution and at
# https://github.com/renatoGarcia/tangram/blob/master/AUTHORS.
#
# This file is part of Tangram.
#
# Tangram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tangram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Tangram in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from .imshow import imshow
|
bitcommoditiz/Silverz-XAGz
|
refs/heads/master
|
contrib/seeds/makeseeds.py
|
753
|
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):9333")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
|
Yukarumya/Yukarum-Redfoxes
|
refs/heads/master
|
js/src/gdb/tests/test-GCCellPtr.py
|
2
|
# Tests for GCCellPtr pretty-printing
assert_subprinter_registered('SpiderMonkey', 'JS::GCCellPtr')
run_fragment('GCCellPtr.simple')
assert_pretty('nulll', 'JS::GCCellPtr(nullptr)')
assert_pretty('object', 'JS::GCCellPtr((JSObject*) )')
assert_pretty('string', 'JS::GCCellPtr((JSString*) )')
assert_pretty('symbol', 'JS::GCCellPtr((JS::Symbol*) )')
|
leture/sorl-thumbnail
|
refs/heads/master
|
tests/settings/imagemagick.py
|
28
|
from .default import *
THUMBNAIL_ENGINE = 'sorl.thumbnail.engines.convert_engine.Engine'
THUMBNAIL_CONVERT = 'convert'
|
theshadowx/enigma2
|
refs/heads/master
|
lib/python/Components/Sources/FrontendInfo.py
|
35
|
from enigma import iPlayableService
from Source import Source
from Components.PerServiceDisplay import PerServiceBase
class FrontendInfo(Source, PerServiceBase):
def __init__(self, service_source = None, frontend_source = None, navcore = None):
self.navcore = None
Source.__init__(self)
if navcore:
PerServiceBase.__init__(self, navcore,
{
iPlayableService.evTunedIn: self.updateFrontendData,
iPlayableService.evEnd: self.serviceEnd
})
self.service_source = service_source
self.frontend_source = frontend_source
self.updateFrontendData()
def serviceEnd(self):
# import pdb
# pdb.set_trace()
self.slot_number = self.frontend_type = None
self.changed((self.CHANGED_CLEAR, ))
def updateFrontendData(self):
data = self.getFrontendData()
if not data:
self.slot_number = self.frontend_type = None
else:
self.slot_number = data.get("tuner_number")
self.frontend_type = data.get("tuner_type")
self.changed((self.CHANGED_ALL, ))
def getFrontendData(self):
if self.frontend_source:
frontend = self.frontend_source()
dict = { }
if frontend:
frontend.getFrontendData(dict)
return dict
elif self.service_source:
service = self.navcore and self.service_source()
feinfo = service and service.frontendInfo()
return feinfo and feinfo.getFrontendData()
elif self.navcore:
service = self.navcore.getCurrentService()
feinfo = service and service.frontendInfo()
return feinfo and feinfo.getFrontendData()
else:
return None
def destroy(self):
if not self.frontend_source and not self.service_source:
PerServiceBase.destroy(self)
Source.destroy(self)
|
MaxiNet/netSLS
|
refs/heads/master
|
network_emulator/process_manager.py
|
2
|
"""
Copyright 2015 Malte Splietker
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os.path
import subprocess
import threading
import time
import configuration
import network_emulator
import process
import ssh_tools
import utils
logger = logging.getLogger(__name__)
class ProcessManager(object):
"""Watches the execution of processes on MaxiNet nodes.
The ProcessManager polls all MaxiNet workers for the state of invoked
background processes. When a remote process terminates, a callback on the
corresponding Process object is performed. Additionally, if the process
terminated with an exit code other than 0, the process's stdout and stderr
are logged.
Attributes:
__interval: Polling interval in ms.
__running_processes: Mapping of MaxiNet workers to running processes.
"""
def __init__(self, interval):
self.__thread = threading.Thread(target=self.run)
self.__interval = interval
self.__running_processes = dict()
self.__running_processes_lock = threading.Lock()
self.__stop = threading.Event()
def start(self):
self.__thread.start()
def stop(self):
"""Stops the thread and waits for termination."""
self.__stop.set()
if self.__thread.isAlive():
self.__thread.join()
def run(self):
self.__stop.clear()
for worker in network_emulator.NetworkEmulator.get_instance().cluster.worker:
self.__running_processes[worker] = dict()
while not self.__stop.isSet():
for worker in network_emulator.NetworkEmulator.get_instance().cluster.worker:
successful_processes = []
try:
successful_processes = self.__worker_get_pids_from_file(
worker,
os.path.join(configuration.get_worker_working_directory(), "pids_successful"))
logger.debug("Successful processes {!s}".format(successful_processes))
except subprocess.CalledProcessError:
# This possible, if file pids_successful does not yet exist
pass
failed_processes = []
try:
failed_processes = self.__worker_get_pids_from_file(
worker,
os.path.join(configuration.get_worker_working_directory(), "pids_failed"))
logger.debug("Failed processes {!s}".format(failed_processes))
except subprocess.CalledProcessError:
# This possible, if file pids_failed does not yet exist
pass
# For every failed process retrieve and print processes's output from worker
for pid in failed_processes:
try:
cat_cmd = "cat {1}".format(
worker.hn(),
os.path.join(configuration.get_worker_working_directory(),
"processes", str(pid)))
logfile_content = ssh_tools.worker_ssh(worker, cat_cmd)
logfile_formatted = utils.indent(logfile_content, 2)
logger.error("Process with PID {0} failed:\n{1}".format(
pid, logfile_formatted))
except subprocess.CalledProcessError, err:
logger.error("Failed to retrieve logfile for process with PID %i" % pid)
# Not allowed, as every daemonized process writes to a logfile
raise err
# post-process successful and failed processes
with self.__running_processes_lock:
# all successful transmissions
for pid in successful_processes:
if pid in self.__running_processes[worker]:
self.__running_processes[worker][pid].call_terminated(
process.Process.SUCCESSFUL)
del self.__running_processes[worker][pid]
else:
logger.error("PID of successful transmission not found")
# all unsuccessful transmissions
for pid in failed_processes:
if pid in self.__running_processes[worker]:
self.__running_processes[worker][pid].call_terminated(
process.Process.FAILED)
del self.__running_processes[worker][pid]
time.sleep(self.__interval)
def start_process(self, proc):
"""Starts the given process and adds it to the list of running processes.
Args:
proc: Process to start.
Returns:
True if the process started successfully, False otherwise.
"""
if not proc.start():
return False
with self.__running_processes_lock:
if proc.get_worker() not in self.__running_processes:
self.__running_processes[proc.get_worker()] = dict()
self.__running_processes[proc.get_worker()][proc.pid] = proc
return True
def reset(self):
"""Reset the process manager.
Kill all processes still running and clear the list.
"""
self.__kill_all_processes()
self.__running_processes = dict()
self.__thread = threading.Thread(target=self.run)
def kill(self, pid):
"""Kill running process with the given PID.
Args:
pid: PID of the process to kill.
"""
with self.__running_processes_lock:
if not pid in self.__running_processes:
return
self.__running_processes[pid].kill()
del self.__running_processes[pid]
def __kill_all_processes(self):
"""Kill all processes that are stil listed as running."""
with self.__running_processes_lock:
if len(self.__running_processes) == 0:
return
for worker, processes in self.__running_processes.items():
kill_cmd = "kill -9"
for pid in processes.keys():
kill_cmd += " {}".format(pid)
ssh_tools.worker_ssh(worker, kill_cmd)
@staticmethod
def __worker_get_pids_from_file(worker, path):
"""Returns a list of PIDs listed in a file on the specified worker node.
Args:
worker: Worker node to read the file on.
path: File containing PIDs.
Returns:
A list of PIDs specified in the file.
"""
# Rotate the pid file, if exists
mv_cmd = "mv {1} {1}.0 &> /dev/null".format(worker.hn(), path)
ssh_tools.worker_ssh(worker, mv_cmd)
# get rotated file's content
cat_cmd = "cat {1}".format(worker.hn(), "%s.0" % path)
content = ssh_tools.worker_ssh(worker, cat_cmd)
return [int(x) for x in content.split()]
|
firerszd/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/plat-darwin/IN.py
|
109
|
# Generated by h2py from /usr/include/netinet/in.h
# Included from sys/appleapiopts.h
# Included from sys/_types.h
# Included from sys/cdefs.h
def __P(protos): return protos
def __STRING(x): return #x
def __P(protos): return ()
def __STRING(x): return "x"
def __attribute__(x): return
def __COPYRIGHT(s): return __IDSTRING(copyright,s)
def __RCSID(s): return __IDSTRING(rcsid,s)
def __SCCSID(s): return __IDSTRING(sccsid,s)
def __PROJECT_VERSION(s): return __IDSTRING(project_version,s)
__DARWIN_UNIX03 = 1
__DARWIN_UNIX03 = 0
__DARWIN_UNIX03 = 0
__DARWIN_UNIX03 = 1
__DARWIN_64_BIT_INO_T = 1
__DARWIN_64_BIT_INO_T = 0
__DARWIN_64_BIT_INO_T = 0
__DARWIN_NON_CANCELABLE = 0
__DARWIN_VERS_1050 = 1
__DARWIN_VERS_1050 = 0
__DARWIN_SUF_UNIX03 = "$UNIX2003"
__DARWIN_SUF_UNIX03_SET = 1
__DARWIN_SUF_UNIX03_SET = 0
__DARWIN_SUF_64_BIT_INO_T = "$INODE64"
__DARWIN_SUF_NON_CANCELABLE = "$NOCANCEL"
__DARWIN_SUF_1050 = "$1050"
__DARWIN_SUF_UNIX03_SET = 0
__DARWIN_SUF_EXTSN = "$DARWIN_EXTSN"
__DARWIN_LONG_DOUBLE_IS_DOUBLE = 0
def __DARWIN_LDBL_COMPAT(x): return
def __DARWIN_LDBL_COMPAT2(x): return
__DARWIN_LONG_DOUBLE_IS_DOUBLE = 1
def __DARWIN_LDBL_COMPAT(x): return
def __DARWIN_LDBL_COMPAT2(x): return
__DARWIN_LONG_DOUBLE_IS_DOUBLE = 0
_DARWIN_FEATURE_LONG_DOUBLE_IS_DOUBLE = 1
_DARWIN_FEATURE_UNIX_CONFORMANCE = 3
_DARWIN_FEATURE_64_BIT_INODE = 1
# Included from machine/_types.h
__PTHREAD_SIZE__ = 1168
__PTHREAD_ATTR_SIZE__ = 56
__PTHREAD_MUTEXATTR_SIZE__ = 8
__PTHREAD_MUTEX_SIZE__ = 56
__PTHREAD_CONDATTR_SIZE__ = 8
__PTHREAD_COND_SIZE__ = 40
__PTHREAD_ONCE_SIZE__ = 8
__PTHREAD_RWLOCK_SIZE__ = 192
__PTHREAD_RWLOCKATTR_SIZE__ = 16
__PTHREAD_SIZE__ = 596
__PTHREAD_ATTR_SIZE__ = 36
__PTHREAD_MUTEXATTR_SIZE__ = 8
__PTHREAD_MUTEX_SIZE__ = 40
__PTHREAD_CONDATTR_SIZE__ = 4
__PTHREAD_COND_SIZE__ = 24
__PTHREAD_ONCE_SIZE__ = 4
__PTHREAD_RWLOCK_SIZE__ = 124
__PTHREAD_RWLOCKATTR_SIZE__ = 12
__DARWIN_NULL = 0
# Included from stdint.h
__WORDSIZE = 64
__WORDSIZE = 32
INT8_MAX = 127
INT16_MAX = 32767
INT32_MAX = 2147483647
INT8_MIN = -128
INT16_MIN = -32768
INT32_MIN = (-INT32_MAX-1)
UINT8_MAX = 255
UINT16_MAX = 65535
INT_LEAST8_MIN = INT8_MIN
INT_LEAST16_MIN = INT16_MIN
INT_LEAST32_MIN = INT32_MIN
INT_LEAST8_MAX = INT8_MAX
INT_LEAST16_MAX = INT16_MAX
INT_LEAST32_MAX = INT32_MAX
UINT_LEAST8_MAX = UINT8_MAX
UINT_LEAST16_MAX = UINT16_MAX
INT_FAST8_MIN = INT8_MIN
INT_FAST16_MIN = INT16_MIN
INT_FAST32_MIN = INT32_MIN
INT_FAST8_MAX = INT8_MAX
INT_FAST16_MAX = INT16_MAX
INT_FAST32_MAX = INT32_MAX
UINT_FAST8_MAX = UINT8_MAX
UINT_FAST16_MAX = UINT16_MAX
INTPTR_MIN = INT32_MIN
INTPTR_MAX = INT32_MAX
PTRDIFF_MIN = INT32_MIN
PTRDIFF_MAX = INT32_MAX
WCHAR_MAX = 0x7fffffff
WCHAR_MIN = 0
WCHAR_MIN = (-WCHAR_MAX-1)
WINT_MIN = INT32_MIN
WINT_MAX = INT32_MAX
SIG_ATOMIC_MIN = INT32_MIN
SIG_ATOMIC_MAX = INT32_MAX
def INT8_C(v): return (v)
def INT16_C(v): return (v)
def INT32_C(v): return (v)
# Included from sys/socket.h
# Included from machine/_param.h
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SO_DEBUG = 0x0001
SO_ACCEPTCONN = 0x0002
SO_REUSEADDR = 0x0004
SO_KEEPALIVE = 0x0008
SO_DONTROUTE = 0x0010
SO_BROADCAST = 0x0020
SO_USELOOPBACK = 0x0040
SO_LINGER = 0x0080
SO_LINGER = 0x1080
SO_OOBINLINE = 0x0100
SO_REUSEPORT = 0x0200
SO_TIMESTAMP = 0x0400
SO_ACCEPTFILTER = 0x1000
SO_DONTTRUNC = 0x2000
SO_WANTMORE = 0x4000
SO_WANTOOBFLAG = 0x8000
SO_SNDBUF = 0x1001
SO_RCVBUF = 0x1002
SO_SNDLOWAT = 0x1003
SO_RCVLOWAT = 0x1004
SO_SNDTIMEO = 0x1005
SO_RCVTIMEO = 0x1006
SO_ERROR = 0x1007
SO_TYPE = 0x1008
SO_NREAD = 0x1020
SO_NKE = 0x1021
SO_NOSIGPIPE = 0x1022
SO_NOADDRERR = 0x1023
SO_NWRITE = 0x1024
SO_REUSESHAREUID = 0x1025
SO_NOTIFYCONFLICT = 0x1026
SO_LINGER_SEC = 0x1080
SO_RESTRICTIONS = 0x1081
SO_RESTRICT_DENYIN = 0x00000001
SO_RESTRICT_DENYOUT = 0x00000002
SO_RESTRICT_DENYSET = (-2147483648)
SO_LABEL = 0x1010
SO_PEERLABEL = 0x1011
SOL_SOCKET = 0xffff
AF_UNSPEC = 0
AF_UNIX = 1
AF_LOCAL = AF_UNIX
AF_INET = 2
AF_IMPLINK = 3
AF_PUP = 4
AF_CHAOS = 5
AF_NS = 6
AF_ISO = 7
AF_OSI = AF_ISO
AF_ECMA = 8
AF_DATAKIT = 9
AF_CCITT = 10
AF_SNA = 11
AF_DECnet = 12
AF_DLI = 13
AF_LAT = 14
AF_HYLINK = 15
AF_APPLETALK = 16
AF_ROUTE = 17
AF_LINK = 18
pseudo_AF_XTP = 19
AF_COIP = 20
AF_CNT = 21
pseudo_AF_RTIP = 22
AF_IPX = 23
AF_SIP = 24
pseudo_AF_PIP = 25
AF_NDRV = 27
AF_ISDN = 28
AF_E164 = AF_ISDN
pseudo_AF_KEY = 29
AF_INET6 = 30
AF_NATM = 31
AF_SYSTEM = 32
AF_NETBIOS = 33
AF_PPP = 34
AF_ATM = 30
pseudo_AF_HDRCMPLT = 35
AF_RESERVED_36 = 36
AF_NETGRAPH = 32
AF_MAX = 37
SOCK_MAXADDRLEN = 255
_SS_MAXSIZE = 128
PF_UNSPEC = AF_UNSPEC
PF_LOCAL = AF_LOCAL
PF_UNIX = PF_LOCAL
PF_INET = AF_INET
PF_IMPLINK = AF_IMPLINK
PF_PUP = AF_PUP
PF_CHAOS = AF_CHAOS
PF_NS = AF_NS
PF_ISO = AF_ISO
PF_OSI = AF_ISO
PF_ECMA = AF_ECMA
PF_DATAKIT = AF_DATAKIT
PF_CCITT = AF_CCITT
PF_SNA = AF_SNA
PF_DECnet = AF_DECnet
PF_DLI = AF_DLI
PF_LAT = AF_LAT
PF_HYLINK = AF_HYLINK
PF_APPLETALK = AF_APPLETALK
PF_ROUTE = AF_ROUTE
PF_LINK = AF_LINK
PF_XTP = pseudo_AF_XTP
PF_COIP = AF_COIP
PF_CNT = AF_CNT
PF_SIP = AF_SIP
PF_IPX = AF_IPX
PF_RTIP = pseudo_AF_RTIP
PF_PIP = pseudo_AF_PIP
PF_NDRV = AF_NDRV
PF_ISDN = AF_ISDN
PF_KEY = pseudo_AF_KEY
PF_INET6 = AF_INET6
PF_NATM = AF_NATM
PF_SYSTEM = AF_SYSTEM
PF_NETBIOS = AF_NETBIOS
PF_PPP = AF_PPP
PF_RESERVED_36 = AF_RESERVED_36
PF_ATM = AF_ATM
PF_NETGRAPH = AF_NETGRAPH
PF_MAX = AF_MAX
NET_MAXID = AF_MAX
NET_RT_DUMP = 1
NET_RT_FLAGS = 2
NET_RT_IFLIST = 3
NET_RT_STAT = 4
NET_RT_TRASH = 5
NET_RT_IFLIST2 = 6
NET_RT_DUMP2 = 7
NET_RT_MAXID = 8
SOMAXCONN = 128
MSG_OOB = 0x1
MSG_PEEK = 0x2
MSG_DONTROUTE = 0x4
MSG_EOR = 0x8
MSG_TRUNC = 0x10
MSG_CTRUNC = 0x20
MSG_WAITALL = 0x40
MSG_DONTWAIT = 0x80
MSG_EOF = 0x100
MSG_WAITSTREAM = 0x200
MSG_FLUSH = 0x400
MSG_HOLD = 0x800
MSG_SEND = 0x1000
MSG_HAVEMORE = 0x2000
MSG_RCVMORE = 0x4000
MSG_NEEDSA = 0x10000
CMGROUP_MAX = 16
SCM_RIGHTS = 0x01
SCM_TIMESTAMP = 0x02
SCM_CREDS = 0x03
SHUT_RD = 0
SHUT_WR = 1
SHUT_RDWR = 2
# Included from machine/endian.h
# Included from sys/_endian.h
def ntohl(x): return (x)
def ntohs(x): return (x)
def htonl(x): return (x)
def htons(x): return (x)
def NTOHL(x): return (x)
def NTOHS(x): return (x)
def HTONL(x): return (x)
def HTONS(x): return (x)
# Included from libkern/_OSByteOrder.h
def __DARWIN_OSSwapConstInt16(x): return \
def __DARWIN_OSSwapConstInt32(x): return \
def __DARWIN_OSSwapConstInt64(x): return \
# Included from libkern/i386/_OSByteOrder.h
def __DARWIN_OSSwapInt16(x): return \
def __DARWIN_OSSwapInt32(x): return \
def __DARWIN_OSSwapInt64(x): return \
def __DARWIN_OSSwapInt16(x): return _OSSwapInt16(x)
def __DARWIN_OSSwapInt32(x): return _OSSwapInt32(x)
def __DARWIN_OSSwapInt64(x): return _OSSwapInt64(x)
def ntohs(x): return __DARWIN_OSSwapInt16(x)
def htons(x): return __DARWIN_OSSwapInt16(x)
def ntohl(x): return __DARWIN_OSSwapInt32(x)
def htonl(x): return __DARWIN_OSSwapInt32(x)
IPPROTO_IP = 0
IPPROTO_HOPOPTS = 0
IPPROTO_ICMP = 1
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_TCP = 6
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_UDP = 17
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_PIM = 103
IPPROTO_PGM = 113
IPPROTO_DIVERT = 254
IPPROTO_RAW = 255
IPPROTO_MAX = 256
IPPROTO_DONE = 257
__DARWIN_IPPORT_RESERVED = 1024
IPPORT_RESERVED = __DARWIN_IPPORT_RESERVED
IPPORT_USERRESERVED = 5000
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
def IN_CLASSA(i): return (((u_int32_t)(i) & (-2147483648)) == 0)
IN_CLASSA_NET = (-16777216)
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((u_int32_t)(i) & (-1073741824)) == (-2147483648))
IN_CLASSB_NET = (-65536)
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & (-536870912)) == (-1073741824))
IN_CLASSC_NET = (-256)
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & (-268435456)) == (-536870912))
IN_CLASSD_NET = (-268435456)
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
def IN_BADCLASS(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
INADDR_NONE = (-1)
def IN_LINKLOCAL(i): return (((u_int32_t)(i) & IN_CLASSB_NET) == IN_LINKLOCALNETNUM)
IN_LOOPBACKNET = 127
INET_ADDRSTRLEN = 16
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_STRIPHDR = 23
IP_RECVTTL = 24
IP_FW_ADD = 40
IP_FW_DEL = 41
IP_FW_FLUSH = 42
IP_FW_ZERO = 43
IP_FW_GET = 44
IP_FW_RESETLOG = 45
IP_OLD_FW_ADD = 50
IP_OLD_FW_DEL = 51
IP_OLD_FW_FLUSH = 52
IP_OLD_FW_ZERO = 53
IP_OLD_FW_GET = 54
IP_NAT__XXX = 55
IP_OLD_FW_RESETLOG = 56
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_TRAFFIC_MGT_BACKGROUND = 65
IP_FORCE_OUT_IFP = 69
TRAFFIC_MGT_SO_BACKGROUND = 0x0001
TRAFFIC_MGT_SO_BG_SUPPRESSED = 0x0002
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
# Included from netinet6/in6.h
__KAME_VERSION = "20010528/apple-darwin"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_PKTINFO = 19
IPV6_HOPLIMIT = 20
IPV6_NEXTHOP = 21
IPV6_HOPOPTS = 22
IPV6_DSTOPTS = 23
IPV6_RTHDR = 24
IPV6_PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_V6ONLY = 27
IPV6_BINDV6ONLY = IPV6_V6ONLY
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_V6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_USETEMPADDR = 32
IPV6CTL_TEMPPLTIME = 33
IPV6CTL_TEMPVLTIME = 34
IPV6CTL_AUTO_LINKLOCAL = 35
IPV6CTL_RIP6STATS = 36
IPV6CTL_MAXFRAGS = 41
IPV6CTL_MAXID = 42
|
CompPhysics/ComputationalPhysics
|
refs/heads/master
|
doc/Programs/PythonCodesLectureNotes/ising2dim.py
|
4
|
from __future__ import division
import matplotlib.pyplot as plt
import numpy as np
import math, sys
def periodic (i, limit, add):
"""
Choose correct matrix index with periodic
boundary conditions
Input:
- i: Base index
- limit: Highest \"legal\" index
- add: Number to add or subtract from i
"""
return (i+limit+add) % limit
def monteCarlo(temp, NSpins, MCcycles):
"""
Calculate the energy and magnetization
(\"straight\" and squared) for a given temperature
Input:
- temp: Temperature to calculate for
- NSpins: dimension of square matrix
- MCcycles: Monte-carlo MCcycles (how many times do we
flip the matrix?)
Output:
- E_av: Energy of matrix averaged over MCcycles, normalized to spins**2
- E_variance: Variance of energy, same normalization * temp**2
- M_av: Magnetic field of matrix, averaged over MCcycles, normalized to spins**2
- M_variance: Variance of magnetic field, same normalization * temp
- Mabs: Absolute value of magnetic field, averaged over MCcycles
"""
#Setup spin matrix, initialize to ground state
spin_matrix = np.zeros( (NSpins,NSpins), np.int8) + 1
#Create and initialize variables
E = M = 0
E_av = E2_av = M_av = M2_av = Mabs_av = 0
#Setup array for possible energy changes
w = np.zeros(17,np.float64)
for de in range(-8,9,4): #include +8
w[de+8] = math.exp(-de/temp)
#Calculate initial magnetization:
M = spin_matrix.sum()
#Calculate initial energy
for j in range(NSpins):
for i in range(NSpins):
E -= spin_matrix.item(i,j)*\
(spin_matrix.item(periodic(i,NSpins,-1),j) + spin_matrix.item(i,periodic(j,NSpins,1)))
#Start metropolis MonteCarlo computation
for i in range(MCcycles):
#Metropolis
#Loop over all spins, pick a random spin each time
for s in range(NSpins**2):
x = int(np.random.random()*NSpins)
y = int(np.random.random()*NSpins)
deltaE = 2*spin_matrix.item(x,y)*\
(spin_matrix.item(periodic(x,NSpins,-1), y) +\
spin_matrix.item(periodic(x,NSpins,1), y) +\
spin_matrix.item(x, periodic(y,NSpins,-1)) +\
spin_matrix.item(x, periodic(y,NSpins,1)))
if np.random.random() <= w[deltaE+8]:
#Accept!
spin_matrix[x,y] *= -1
M += 2*spin_matrix[x,y]
E += deltaE
#Update expectation values
E_av += E
E2_av += E**2
M_av += M
M2_av += M**2
Mabs_av += int(math.fabs(M))
#Normalize average values
E_av /= float(MCcycles);
E2_av /= float(MCcycles);
M_av /= float(MCcycles);
M2_av /= float(MCcycles);
Mabs_av /= float(MCcycles);
#Calculate variance and normalize to per-point and temp
E_variance = (E2_av-E_av*E_av)/float(NSpins*NSpins*temp*temp);
M_variance = (M2_av-M_av*M_av)/float(NSpins*NSpins*temp);
#Normalize returned averages to per-point
E_av /= float(NSpins*NSpins);
M_av /= float(NSpins*NSpins);
Mabs_av /= float(NSpins*NSpins);
return (E_av, E_variance, M_av, M_variance, Mabs_av)
# Main program
# temperature steps, initial temperature, final temperature
NumberTsteps = 20
InitialT = 1.5
FinalT = 2.5
Tsteps = (FinalT-InitialT)/NumberTsteps
Temp = np.zeros(NumberTsteps)
for T in range(NumberTsteps):
Temp[T] = InitialT+T*Tsteps
# Declare arrays that hold averages
Energy = np.zeros(NumberTsteps); Magnetization = np.zeros(NumberTsteps)
SpecificHeat = np.zeros(NumberTsteps); Susceptibility = np.zeros(NumberTsteps)
MagnetizationAbs = np.zeros(NumberTsteps)
# Define number of spins
NSpins = 20
# Define number of Monte Carlo cycles
MCcycles = 100000
# Perform the simulations over a range of temperatures
for T in range(NumberTsteps):
(Energy[T], SpecificHeat[T], Magnetization[T], Susceptibility[T], MagnetizationAbs[T]) = monteCarlo(Temp[T],NSpins,MCcycles)
# And finally plot
f = plt.figure(figsize=(18, 10)); # plot the calculated values
sp = f.add_subplot(2, 2, 1 );
plt.plot(Temp, Energy, 'o', color="green");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Energy ", fontsize=20);
sp = f.add_subplot(2, 2, 2 );
plt.plot(Temp, abs(Magnetization), 'o', color="red");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Magnetization ", fontsize=20);
sp = f.add_subplot(2, 2, 3 );
plt.plot(Temp, SpecificHeat, 'o', color="blue");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Specific Heat ", fontsize=20);
sp = f.add_subplot(2, 2, 4 );
plt.plot(Temp, Susceptibility, 'o', color="black");
plt.xlabel("Temperature (T)", fontsize=20);
plt.ylabel("Susceptibility", fontsize=20);
plt.show()
|
PSU-CAPSTONE-BEEEEEES/Tellagence
|
refs/heads/master
|
test/jshamcrest/doc/conf.py
|
1
|
# -*- coding: utf-8 -*-
#
# JsHamcrest documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 21 23:03:59 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'JsHamcrest'
copyright = u'2009-2011, Destaquenet Technology Solutions'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'javascript'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'JsHamcrestdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'JsHamcrest.tex', u'JsHamcrest Documentation',
u'Destaquenet Technology Solutions', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
noxdafox/see
|
refs/heads/master
|
plugins/screen.py
|
2
|
# Copyright 2015-2017 F-Secure
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Module for acquiring screenshots of a running VM."""
import os
from io import BytesIO
from see import Hook
from see.context import RUNNING, PAUSED
from .utils import create_folder
class ScreenHook(Hook):
"""
Screenshot capturing hook.
On the given event, it captures the Context's screen state
on a file in the given folder.
configuration::
{
"results_folder": "/folder/where/to/store/memory/dump/file"
"screenshot_on_event": ["event_triggering_memory_dump"]
}
The "screenshot_on_event" can be either a string representing the event
or a list of multiple ones.
"""
def __init__(self, parameters):
super().__init__(parameters)
self.setup_handlers()
def setup_handlers(self):
screenshots = self.configuration.get('screenshot_on_event', ())
events = isinstance(screenshots, str) and [screenshots] or screenshots
for event in events:
self.context.subscribe(event, self.screenshot_handler)
self.logger.debug("Screenshot registered at %s event", event)
def screenshot_handler(self, event):
self.logger.debug("Event %s: capturing screenshot.", event)
screenshot_path = self.screenshot(event)
self.logger.info("Screenshot %s captured.", screenshot_path)
def screenshot(self, event):
self.assert_context_state()
folder_path = self.configuration['results_folder']
screenshot_path = os.path.join(folder_path,
"%s_%s.ppm" % (self.identifier, event))
create_folder(folder_path)
with open(screenshot_path, 'wb') as screenshot_file:
screenshot_stream = screenshot(self.context)
screenshot_file.write(screenshot_stream)
def assert_context_state(self):
if self.context.domain.state()[0] not in (RUNNING, PAUSED):
raise RuntimeError("Context must be running or paused")
def screenshot(context):
"""Takes a screenshot of the vnc connection of the guest.
The resulting image file will be in Portable Pixmap format (PPM).
@param context: (see.Context) context of the Environment.
@return: (str) binary stream containing the screenshot.
"""
handler = lambda _, buff, file_handler: file_handler.write(buff)
string = BytesIO()
stream = context.domain.connect().newStream(0)
context.domain.screenshot(stream, 0, 0)
stream.recvAll(handler, string)
return string.getvalue()
|
gcd0318/django
|
refs/heads/master
|
django/contrib/gis/maps/__init__.py
|
12133432
| |
joequery/django
|
refs/heads/master
|
tests/migrations/test_migrations_no_changes/__init__.py
|
12133432
| |
broferek/ansible
|
refs/heads/devel
|
lib/ansible/executor/discovery/__init__.py
|
12133432
| |
zdary/intellij-community
|
refs/heads/master
|
python/testData/refactoring/unwrap/whileInWhileUnwrap_after.py
|
80
|
while True:
# comment
x = 1<caret>
y = 2
|
miconof/headphones
|
refs/heads/master
|
lib/requests/packages/urllib3/util/url.py
|
375
|
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`.
"""
slots = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx+1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers.
if not port.isdigit():
raise LocationParseError(url)
port = int(port)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`.parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
|
nuclear-wizard/moose
|
refs/heads/devel
|
python/mooseutils/tests/test_MooseDataFrame.py
|
12
|
#!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import shutil
import unittest
import time
import mooseutils
class TestMooseDataFrame(unittest.TestCase):
"""
Test use of MooseDataFrame for loading/reloading csv files.
"""
def setUp(self):
"""
Define the test filename.
"""
self._filename = '../../test_files/white_elephant_jan_2016.csv'
self._keys = ['air_temp_low_24_hour_set_1', 'snow_depth_set_1']
def testBasic(self):
"""
Test that if a file exists it is loaded w/o error.
"""
# Test basic read
data = mooseutils.MooseDataFrame(self._filename)
self.assertEqual(self._filename, data.filename)
self.assertTrue(data)
# Key Testing
for k in self._keys:
self.assertTrue(k in data)
# Check data
x = data[self._keys]
self.assertEqual(x.loc[10][self._keys[0]], 2.12)
self.assertEqual(x.loc[10][self._keys[1]], 51.00)
def testNoFile(self):
"""
Test that no-file doesn't fail.
"""
filename = 'not_a_file.csv'
data = mooseutils.MooseDataFrame(filename)
self.assertEqual(filename, data.filename)
self.assertFalse(data)
# Key Testing
self.assertFalse('key' in data)
x = data[ ['key1', 'key2'] ]
self.assertTrue(x.empty)
def testEmptyUpdateRemove(self):
"""
Test that data appears when file is loaded.
"""
# Temporary filename
filename = "{}_{}.csv".format(self.__class__.__name__, 'tmp')
if os.path.exists(filename):
os.remove(filename)
# (1) No-file
data = mooseutils.MooseDataFrame(filename)
self.assertEqual(filename, data.filename)
for k in self._keys:
self.assertFalse(k in data)
x = data[self._keys]
self.assertTrue(x.empty)
# (2) Data exists
shutil.copyfile(self._filename, filename)
data.update()
for k in self._keys:
self.assertTrue(k in data)
x = data[self._keys]
self.assertEqual(x.loc[10][self._keys[0]], 2.12)
self.assertEqual(x.loc[10][self._keys[1]], 51.00)
self.assertFalse(x.empty)
# (3) Data remove
os.remove(filename)
data.update()
for k in self._keys:
self.assertFalse(k in data)
x = data[self._keys]
self.assertTrue(x.empty)
def testIndex(self):
"""
Test that the index of the data may be set.
"""
data = mooseutils.MooseDataFrame(self._filename, index='time')
x = data[self._keys]
idx = 29.42
self.assertEqual(x.loc[idx][self._keys[0]], 20.12)
self.assertEqual(x.loc[idx][self._keys[1]], 59.00)
def testOldFile(self):
"""
Test that "old" files do not load.
"""
data = mooseutils.MooseDataFrame(self._filename, index='time')
self.assertTrue(data)
data = mooseutils.MooseDataFrame(self._filename, index='time', run_start_time=time.time())
self.assertFalse(data)
if __name__ == '__main__':
unittest.main(module=__name__, verbosity=2)
|
EderSantana/blocks_contrib
|
refs/heads/master
|
tests/__init__.py
|
12133432
| |
jmankiewicz/odooAddons
|
refs/heads/master
|
fritzbox/controllers/__init__.py
|
12133432
| |
gosquadron/squadron
|
refs/heads/master
|
squadron/fileio/loghandlers/__init__.py
|
12133432
| |
Vixionar/django
|
refs/heads/master
|
django/core/checks/security/__init__.py
|
12133432
| |
magic0704/neutron
|
refs/heads/master
|
neutron/plugins/ml2/drivers/cisco/nexus/__init__.py
|
12133432
| |
nmercier/linux-cross-gcc
|
refs/heads/master
|
win32/bin/Lib/lib2to3/tests/data/fixers/myfixes/__init__.py
|
12133432
| |
ibmsoe/ImpalaPPC
|
refs/heads/Impala2.6-main
|
tests/benchmark/__init__.py
|
12133432
| |
endolith/numpy
|
refs/heads/master
|
numpy/random/tests/data/__init__.py
|
12133432
| |
aginor/wesnoth
|
refs/heads/master
|
data/tools/unit_tree/helpers.py
|
13
|
"""
Various helpers for use by the wmlunits tool.
"""
import sys, os, re, glob, shutil, copy, urllib.request, urllib.error, urllib.parse, subprocess
import wesnoth.wmlparser3 as wmlparser3
def get_datadir(wesnoth_exe):
p = subprocess.Popen([wesnoth_exe, "--path"],
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
out, err = p.communicate()
return out.strip()
def get_userdir(wesnoth_exe):
p = subprocess.Popen([wesnoth_exe, "--config-path"],
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
out, err = p.communicate()
return out.strip()
class Image:
def __init__(self, id_name, ipath, bases, no_tc):
self.id_name = id_name
self.ipath = ipath # none if it was not found
self.bases = bases
self.no_tc = no_tc
self.addons = set()
class ImageCollector:
"""
A class to collect all the images which need to be copied to the HTML
output folder.
"""
def __init__(self, wesnoth_exe, userdir, datadir):
self.images_by_addon_name = {}
self.images_by_ipath = {}
self.binary_paths_per_addon = {}
self.datadir = datadir
self.userdir = userdir
if not self.datadir: self.datadir = get_datadir(wesnoth_exe)
if not self.userdir: self.userdir = get_userdir(wesnoth_exe)
def add_binary_paths_from_WML(self, addon, WML):
for binpath in WML.get_all(tag = "binary_path"):
path = binpath.get_text_val("path")
if addon not in self.binary_paths_per_addon:
self.binary_paths_per_addon[addon] = []
self.binary_paths_per_addon[addon].append(path)
def _find_image(self, addon, name):
tilde = name.find("~")
if tilde >= 0:
name = name[:tilde]
bases = [os.path.join(self.datadir, "data/core/images")]
binpaths = self.binary_paths_per_addon.get(addon, [])[:]
binpaths.reverse()
for x in binpaths:
for idir in ["images", "images/units"]:
bases.append(os.path.join(self.datadir, x, idir))
bases.append(os.path.join(self.userdir, x, idir))
bases = [os.path.join("%s" % base, name) for base in bases]
for ipath in bases:
if os.path.exists(ipath): return ipath, bases
return None, bases
def add_image_check(self, addon, name, no_tc = False):
if (addon, name) in self.images_by_addon_name:
image = self.images_by_addon_name[(addon, name)]
if addon not in image.addons: image.addons.add(addon)
return image
ipath, bases = self._find_image(addon, name)
if ipath in self.images_by_ipath:
image = self.images_by_ipath[ipath]
if addon not in image.addons: image.addons.add(addon)
return image
def make_name(x):
x = x.strip("./ ")
d = options.config_dir.strip("./ ")
if x.startswith(d): x = x[len(d):]
d = options.data_dir.strip("./ ")
if x.startswith(d): x = x[len(d):]
x = x.strip("./ ")
if x.startswith("data"): x = x[len("data"):]
x = x.strip("./ ")
y = ""
for c in x:
if c == "/": c = "$"
elif not c.isalnum() and c not in ".+-()[]{}": c = "_"
y += c
return y
if ipath:
id_name = make_name(ipath)
else:
id_name = make_name(addon + "/" + name)
image = Image(id_name, ipath, bases, no_tc)
image.addons.add(addon)
self.images_by_addon_name[(addon, name)] = image
if ipath:
self.images_by_ipath[ipath] = image
return image
def add_image(self, addon, path, no_tc = False):
image = self.add_image_check(addon, path, no_tc)
return image.id_name
def copy_and_color_images(self, target_path):
for image in list(self.images_by_ipath.values()):
opath = os.path.join(target_path, "pics", image.id_name)
try:
os.makedirs(os.path.dirname(opath))
except OSError:
pass
no_tc = image.no_tc
ipath = os.path.normpath(image.ipath)
cdir = os.path.normpath(options.config_dir + "/data/add-ons")
if ipath.startswith(cdir):
ipath = os.path.join(options.addons, ipath[len(cdir):].lstrip("/"))
if ipath and os.path.exists(ipath):
if no_tc:
shutil.copy2(ipath, opath)
else:
# We assume TeamColorizer is in the same directory as the
# helpers.py currently executing.
command = os.path.join(os.path.dirname(__file__),
"TeamColorizer")
p = subprocess.Popen([command, ipath, opath])
p.wait()
else:
sys.stderr.write(
"Warning: Required image %s does not exist (referenced by %s).\n" % (
image.id_name, ", ".join(image.addons)))
if options.verbose:
if image.bases:
sys.stderr.write("Warning: Looked at the following locations:\n")
sys.stderr.write("\n".join(image.bases) + "\n")
else:
sys.stderr.write("nowhere\n")
blah = 1
class WesnothList:
"""
Lists various Wesnoth stuff like units, campaigns, terrains, factions...
"""
def __init__(self, wesnoth_exe, config_dir, data_dir, transdir):
self.unit_lookup = {}
self.race_lookup = {}
self.terrain_lookup = {}
self.movetype_lookup = {}
self.era_lookup = {}
self.campaign_lookup = {}
self.parser = wmlparser3.Parser(wesnoth_exe, config_dir,
data_dir)
def add_terrains(self):
"""
We need those for movement costs and defenses.
"""
self.parser.parse_text("{core/terrain.cfg}\n")
n = 0
for terrain in self.parser.get_all(tag = "terrain_type"):
tstring = terrain.get_text_val("string")
self.terrain_lookup[tstring] = terrain
n += 1
return n
def add_languages(self, languages):
"""
Returns a dictionary mapping isocodes to languages.
"""
self.languages_found = {}
parser = wmlparser3.Parser(options.wesnoth, options.config_dir,
options.data_dir)
parser.parse_text("{languages}")
for locale in parser.get_all(tag="locale"):
isocode = locale.get_text_val("locale")
name = locale.get_text_val("name")
if isocode == "ang_GB":
continue
self.languages_found[isocode] = name
def add_era(self, era):
"""
For an era, list all factions and units in it.
"""
eid = era.get_text_val("id")
if not eid: return
self.era_lookup[eid] = era
era.faction_lookup = {}
for multiplayer_side in era.get_all(tag = "multiplayer_side"):
fid = multiplayer_side.get_text_val("id")
if fid == "Random": continue
era.faction_lookup[fid] = multiplayer_side
recruit = multiplayer_side.get_text_val("recruit", "").strip()
leader = multiplayer_side.get_text_val("leader", "").strip()
units = recruit.split(",")
leaders = leader.split(",")
multiplayer_side.units = {}
multiplayer_side.is_leader = {}
for u in units:
uid = u.strip()
if uid:
multiplayer_side.units[uid] = True
for u in leaders:
uid = u.strip()
if uid:
multiplayer_side.units[uid] = True
multiplayer_side.is_leader[uid] = True
return eid
def add_binary_paths(self, addon, image_collector):
image_collector.add_binary_paths_from_WML(addon, self.parser.root)
def add_campaign(self, campaign):
name = campaign.get_text_val("id")
if not name:
global blah
name = "noid%d" % blah
blah += 1
self.campaign_lookup[name] = campaign
return name
def add_mainline_eras(self):
"""
Find all mainline eras.
"""
self.parser.parse_text("{multiplayer/eras.cfg}")
n = 0
for era in self.parser.get_all(tag = "era"):
self.add_era(era)
n += 1
return n
def add_units(self, campaign):
"""
We assume each unit, in mainline and all addons, has one unique id. So
we reference them everywhere by this id, and here can add them all to
one big collection.
"""
addunits = self.parser.get_all(tag = "units")
addunits += self.parser.get_all(tag = "+units")
if not addunits: return 0
def getall(oftype):
r = []
r = []
for units in addunits:
r += units.get_all(tag = oftype)
return r
# Find all unit types.
newunits = getall("unit_type") + getall("unit")
for unit in newunits:
uid = unit.get_text_val("id")
unit.id = uid
if unit.get_text_val("do_not_list", "no") != "no" or\
unit.get_text_val("hide_help", "no") not in ["no", "false"]:
unit.hidden = True
else:
unit.hidden = False
if uid in self.unit_lookup:
unit = self.unit_lookup[uid]
# TODO: We might want to compare the two units
# with the same id and if one is different try
# to do something clever like rename it...
else:
self.unit_lookup[uid] = unit
if not hasattr(unit, "campaigns"): unit.campaigns = []
unit.campaigns.append(campaign)
# Find all races.
newraces = getall("race")
for race in newraces:
rid = race.get_text_val("id")
if rid == None:
rid = race.get_text_val("name")
self.race_lookup[rid] = race
# Find all movetypes.
newmovetypes = getall("movetype")
for movetype in newmovetypes:
mtname = movetype.get_text_val("name")
self.movetype_lookup[mtname] = movetype
# Store race/movetype/faction of each unit for easier access later.
for unit in newunits:
uid = unit.get_text_val("id")
race = self.get_unit_value(unit, "race")
try:
unit.race = self.race_lookup[race]
unit.rid = unit.race.get_text_val("id", "none")
except KeyError:
unit.race = None
unit.rid = "none"
error_message("Warning: No race \"%s\" found (%s).\n" % (
race, unit.get_text_val("id")))
movetype = self.get_unit_value(unit, "movement_type")
try: unit.movetype = self.movetype_lookup[movetype]
except KeyError: unit.movetype = None
unit.advance = []
advanceto = unit.get_text_val("advances_to")
# Add backwards compatibility for 1.4
if not advanceto:
advanceto = unit.get_text_val("advanceto")
if advanceto and advanceto != "null":
for advance in advanceto.split(","):
auid = advance.strip()
if auid: unit.advance.append(auid)
# level
try:
level = int(self.get_unit_value(unit, "level"))
except TypeError:
level = 0
except ValueError:
level = 0
if level < 0: level = 0
unit.level = level
return len(newunits)
def check_units(self):
"""
Once all units have been added, do some checking.
"""
# handle advancefrom tags
for uid, unit in list(self.unit_lookup.items()):
for advancefrom in unit.get_all(tag = "advancefrom"):
fromid = advancefrom.get_text_val("unit")
if fromid:
try:
fromunit = self.unit_lookup[fromid]
except KeyError:
error_message(
"Error: Unit '%s' references non-existant [advancefrom] unit '%s'" % (
uid, fromid))
continue
if uid not in fromunit.advance:
fromunit.advance.append(uid)
def find_unit_factions(self):
for unit in list(self.unit_lookup.values()):
unit.factions = []
unit.eras = []
for eid, era in list(self.era_lookup.items()):
for fid, multiplayer_side in list(era.faction_lookup.items()):
for uid in multiplayer_side.units:
try:
unit = self.unit_lookup[uid]
except KeyError:
error_message(
("Error: Era '%s' faction '%s' references " +
"non-existant unit id '%s'!\n") % (
eid,
fid,
str(uid)))
continue
if not eid in unit.eras:
unit.eras.append(eid)
unit.factions.append((eid, fid))
# as a special case, add units from this addon but with no faction
for unit in list(self.unit_lookup.values()):
if unit.campaigns[0] == self.cid:
if not unit.factions:
if not eid in unit.eras:
unit.eras.append(eid)
unit.factions.append((eid, None))
def get_base_unit(self, unit):
b = unit.get_all(tag = "base_unit")
if b:
b = b[0]
buid = b.get_text_val("id")
try: baseunit = self.unit_lookup[buid]
except KeyError:
error_message(
"Warning: No baseunit \"%s\" for \"%s\".\n" % (
buid, unit.get_text_val("id")))
return None
return baseunit
return None
def get_unit_value(self, unit, attribute, default = None, translation = None):
value = unit.get_text_val(attribute, None, translation)
if value == None:
baseunit = self.get_base_unit(unit)
if baseunit:
return self.get_unit_value(baseunit, attribute, default, translation)
return default
return value
class UnitForest:
"""
Contains the forest of unit advancement trees.
"""
def __init__(self):
self.trees = {}
self.lookup = {}
def add_node(self, un):
"""
Add a new unit to the forest.
"""
self.lookup[un.id] = un
def create_network(self):
"""
Assuming that each node which has been added to the tree only has a
valid list of children in unit.child_ids, also fill in unit.parent_ids
and update the unit.children shortcut.
"""
# Complete the network
for uid, u in list(self.lookup.items()):
for cid in u.child_ids:
c = self.lookup.get(cid, None)
if not c: continue
u.children.append(c)
if not uid in c.parent_ids:
c.parent_ids.append(uid)
# Put all roots into the forest
for uid, u in list(self.lookup.items()):
if not u.parent_ids:
self.trees[uid] = u
# Sanity check because some argGRRxxx addons have units who advance to
# themselves.
def recurse(u, already):
already2 = already.copy()
for c in u.children[:]:
already2[c.id] = True
if c.id in already:
error_message(
("Warning: Unit %s advances to unit %s in a loop.\n" %
(u.id, c.id)) +
(" Removing advancement %s.\n" % c.id))
u.children.remove(c)
for c in u.children:
recurse(c, already2)
for u in list(self.trees.values()):
already = {u.id : True}
recurse(u, already)
def update(self):
self.create_network()
self.breadth = sum([x.update_breadth() for x in list(self.trees.values())])
return self.breadth
def get_children(self, uid):
un = self.lookup[uid]
return un.child_ids
def get_parents(self, uid):
un = self.lookup[uid]
return un.parent_ids
class UnitNode:
"""
A node in the advancement trees forest.
"""
def __init__(self, unit):
self.unit = unit
self.children = []
self.id = unit.get_text_val("id")
self.child_ids = []
self.parent_ids = []
self.child_ids.extend(unit.advance)
def update_breadth(self):
if not self.children:
self.breadth = 1
else:
self.breadth = sum([x.update_breadth() for x in self.children])
return self.breadth
class GroupNode:
def __init__(self, data):
self.data = data
|
linjoahow/w17g
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/xml/sax/saxutils.py
|
730
|
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
|
jusdng/odoo
|
refs/heads/8.0
|
addons/purchase/res_config.py
|
357
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class purchase_config_settings(osv.osv_memory):
_name = 'purchase.config.settings'
_inherit = 'res.config.settings'
_columns = {
'default_invoice_method': fields.selection(
[('manual', 'Based on purchase order lines'),
('picking', 'Based on incoming shipments'),
('order', 'Pre-generate draft invoices based on purchase orders'),
], 'Default invoicing control method', required=True, default_model='purchase.order'),
'group_purchase_pricelist':fields.boolean("Manage pricelist per supplier",
implied_group='product.group_purchase_pricelist',
help='Allows to manage different prices based on rules per category of Supplier.\n'
'Example: 10% for retailers, promotion of 5 EUR on this product, etc.'),
'group_uom':fields.boolean("Manage different units of measure for products",
implied_group='product.group_uom',
help="""Allows you to select and maintain different units of measure for products."""),
'group_costing_method':fields.boolean("Use 'Real Price' or 'Average' costing methods.",
implied_group='stock_account.group_inventory_valuation',
help="""Allows you to compute product cost price based on average cost."""),
'module_warning': fields.boolean("Alerts by products or supplier",
help='Allow to configure notification on products and trigger them when a user wants to purchase a given product or a given supplier.\n'
'Example: Product: this product is deprecated, do not purchase more than 5.\n'
'Supplier: don\'t forget to ask for an express delivery.'),
'module_purchase_double_validation': fields.boolean("Force two levels of approvals",
help='Provide a double validation mechanism for purchases exceeding minimum amount.\n'
'-This installs the module purchase_double_validation.'),
'module_purchase_requisition': fields.boolean("Manage calls for bids",
help="""Calls for bids are used when you want to generate requests for quotations to several suppliers for a given set of products.
You can configure per product if you directly do a Request for Quotation
to one supplier or if you want a Call for Bids to compare offers from several suppliers."""),
'group_advance_purchase_requisition': fields.boolean("Choose from several bids in a call for bids",
implied_group='purchase.group_advance_bidding',
help="""In the process of a public bidding, you can compare the bid lines and choose for each requested product from which bid you
buy which quantity"""),
'module_purchase_analytic_plans': fields.boolean('Use multiple analytic accounts on purchase orders',
help='Allows the user to maintain several analysis plans. These let you split lines on a purchase order between several accounts and analytic plans.\n'
'-This installs the module purchase_analytic_plans.'),
'group_analytic_account_for_purchases': fields.boolean('Analytic accounting for purchases',
implied_group='purchase.group_analytic_accounting',
help="Allows you to specify an analytic account on purchase orders."),
'module_stock_dropshipping': fields.boolean("Manage dropshipping",
help='\nCreates the dropship route and add more complex tests'
'-This installs the module stock_dropshipping.'),
}
_defaults = {
'default_invoice_method': 'order',
}
def onchange_purchase_analytic_plans(self, cr, uid, ids, module_purchase_analytic_plans, context=None):
""" change group_analytic_account_for_purchases following module_purchase_analytic_plans """
if not module_purchase_analytic_plans:
return {}
return {'value': {'group_analytic_account_for_purchases': module_purchase_analytic_plans}}
class account_config_settings(osv.osv_memory):
_inherit = 'account.config.settings'
_columns = {
'module_purchase_analytic_plans': fields.boolean('Use multiple analytic accounts on orders',
help='Allows the user to maintain several analysis plans. These let you split lines on a purchase order between several accounts and analytic plans.\n'
'-This installs the module purchase_analytic_plans.'),
'group_analytic_account_for_purchases': fields.boolean('Analytic accounting for purchases',
implied_group='purchase.group_analytic_accounting',
help="Allows you to specify an analytic account on purchase orders."),
}
def onchange_purchase_analytic_plans(self, cr, uid, ids, module_purchase_analytic_plans, context=None):
""" change group_analytic_account_for_purchases following module_purchase_analytic_plans """
if not module_purchase_analytic_plans:
return {}
return {'value': {'group_analytic_account_for_purchases': module_purchase_analytic_plans}}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
YangChihWei/w16b_test
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/_collections.py
|
603
|
# "High performance data structures
# "
# copied from pypy repo
#
# Copied and completed from the sandbox of CPython
# (nondist/sandbox/collections/pydeque.py rev 1.1, Raymond Hettinger)
#
# edited for Brython line 558 : catch ImportError instead of AttributeError
import operator
#try:
# from thread import get_ident as _thread_ident
#except ImportError:
def _thread_ident():
return -1
n = 30
LFTLNK = n
RGTLNK = n+1
BLOCKSIZ = n+2
# The deque's size limit is d.maxlen. The limit can be zero or positive, or
# None. After an item is added to a deque, we check to see if the size has
# grown past the limit. If it has, we get the size back down to the limit by
# popping an item off of the opposite end. The methods that can trigger this
# are append(), appendleft(), extend(), and extendleft().
#class deque(object):
class deque:
def __new__(cls, iterable=(), *args, **kw):
#fixme
#self = super(deque, cls).__new__(cls, *args, **kw)
self=object.__new__(cls, *args, **kw)
self.clear()
return self
def __init__(self, iterable=(), maxlen=None):
object.__init__(self)
self.clear()
if maxlen is not None:
if maxlen < 0:
raise ValueError("maxlen must be non-negative")
self._maxlen = maxlen
add = self.append
for elem in iterable:
add(elem)
@property
def maxlen(self):
return self._maxlen
def clear(self):
self.right = self.left = [None] * BLOCKSIZ
self.rightndx = n//2 # points to last written element
self.leftndx = n//2+1
self.length = 0
self.state = 0
def append(self, x):
self.state += 1
self.rightndx += 1
if self.rightndx == n:
newblock = [None] * BLOCKSIZ
self.right[RGTLNK] = newblock
newblock[LFTLNK] = self.right
self.right = newblock
self.rightndx = 0
self.length += 1
self.right[self.rightndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.popleft()
def appendleft(self, x):
self.state += 1
self.leftndx -= 1
if self.leftndx == -1:
newblock = [None] * BLOCKSIZ
self.left[LFTLNK] = newblock
newblock[RGTLNK] = self.left
self.left = newblock
self.leftndx = n-1
self.length += 1
self.left[self.leftndx] = x
if self.maxlen is not None and self.length > self.maxlen:
self.pop()
def extend(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
if iterable is self:
iterable = list(iterable)
for elem in iterable:
self.appendleft(elem)
def pop(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque" # does not work in brython
raise IndexError("pop from an empty deque")
x = self.right[self.rightndx]
self.right[self.rightndx] = None
self.length -= 1
self.rightndx -= 1
self.state += 1
if self.rightndx == -1:
prevblock = self.right[LFTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[RGTLNK] = None
self.right[LFTLNK] = None
self.right = prevblock
self.rightndx = n-1
return x
def popleft(self):
if self.left is self.right and self.leftndx > self.rightndx:
#raise IndexError, "pop from an empty deque"
raise IndexError("pop from an empty deque")
x = self.left[self.leftndx]
self.left[self.leftndx] = None
self.length -= 1
self.leftndx += 1
self.state += 1
if self.leftndx == n:
prevblock = self.left[RGTLNK]
if prevblock is None:
# the deque has become empty; recenter instead of freeing block
self.rightndx = n//2
self.leftndx = n//2+1
else:
prevblock[LFTLNK] = None
self.left[RGTLNK] = None
self.left = prevblock
self.leftndx = 0
return x
def count(self, value):
c = 0
for item in self:
if item == value:
c += 1
return c
def remove(self, value):
# Need to be defensive for mutating comparisons
for i in range(len(self)):
if self[i] == value:
del self[i]
return
raise ValueError("deque.remove(x): x not in deque")
def rotate(self, n=1):
length = len(self)
if length == 0:
return
halflen = (length+1) >> 1
if n > halflen or n < -halflen:
n %= length
if n > halflen:
n -= length
elif n < -halflen:
n += length
while n > 0:
self.appendleft(self.pop())
n -= 1
while n < 0:
self.append(self.popleft())
n += 1
def reverse(self):
"reverse *IN PLACE*"
leftblock = self.left
rightblock = self.right
leftindex = self.leftndx
rightindex = self.rightndx
for i in range(self.length // 2):
# Validate that pointers haven't met in the middle
assert leftblock != rightblock or leftindex < rightindex
# Swap
(rightblock[rightindex], leftblock[leftindex]) = (
leftblock[leftindex], rightblock[rightindex])
# Advance left block/index pair
leftindex += 1
if leftindex == n:
leftblock = leftblock[RGTLNK]
assert leftblock is not None
leftindex = 0
# Step backwards with the right block/index pair
rightindex -= 1
if rightindex == -1:
rightblock = rightblock[LFTLNK]
assert rightblock is not None
rightindex = n - 1
def __repr__(self):
threadlocalattr = '__repr' + str(_thread_ident())
if threadlocalattr in self.__dict__:
return 'deque([...])'
else:
self.__dict__[threadlocalattr] = True
try:
if self.maxlen is not None:
return 'deque(%r, maxlen=%s)' % (list(self), self.maxlen)
else:
return 'deque(%r)' % (list(self),)
finally:
del self.__dict__[threadlocalattr]
def __iter__(self):
return deque_iterator(self, self._iter_impl)
def _iter_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in block[l:r]:
yield elem
if self.state != original_state:
giveup()
block = block[RGTLNK]
def __reversed__(self):
return deque_iterator(self, self._reversed_impl)
def _reversed_impl(self, original_state, giveup):
if self.state != original_state:
giveup()
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
for elem in reversed(block[l:r]):
yield elem
if self.state != original_state:
giveup()
block = block[LFTLNK]
def __len__(self):
#sum = 0
#block = self.left
#while block:
# sum += n
# block = block[RGTLNK]
#return sum + self.rightndx - self.leftndx + 1 - n
return self.length
def __getref(self, index):
if index >= 0:
block = self.left
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
span = r-l
if index < span:
return block, l+index
index -= span
block = block[RGTLNK]
else:
block = self.right
while block:
l, r = 0, n
if block is self.left:
l = self.leftndx
if block is self.right:
r = self.rightndx + 1
negative_span = l-r
if index >= negative_span:
return block, r+index
index -= negative_span
block = block[LFTLNK]
raise IndexError("deque index out of range")
def __getitem__(self, index):
block, index = self.__getref(index)
return block[index]
def __setitem__(self, index, value):
block, index = self.__getref(index)
block[index] = value
def __delitem__(self, index):
length = len(self)
if index >= 0:
if index >= length:
raise IndexError("deque index out of range")
self.rotate(-index)
self.popleft()
self.rotate(index)
else:
#index = ~index #todo until bit wise operators are in bython
index= index^(2**31)
if index >= length:
raise IndexError("deque index out of range")
self.rotate(index)
self.pop()
self.rotate(-index)
def __reduce_ex__(self, proto):
return type(self), (list(self), self.maxlen)
def __hash__(self):
#raise TypeError, "deque objects are unhashable"
raise TypeError("deque objects are unhashable")
def __copy__(self):
return self.__class__(self, self.maxlen)
# XXX make comparison more efficient
def __eq__(self, other):
if isinstance(other, deque):
return list(self) == list(other)
else:
return NotImplemented
def __ne__(self, other):
if isinstance(other, deque):
return list(self) != list(other)
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, deque):
return list(self) < list(other)
else:
return NotImplemented
def __le__(self, other):
if isinstance(other, deque):
return list(self) <= list(other)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, deque):
return list(self) > list(other)
else:
return NotImplemented
def __ge__(self, other):
if isinstance(other, deque):
return list(self) >= list(other)
else:
return NotImplemented
def __iadd__(self, other):
self.extend(other)
return self
class deque_iterator(object):
def __init__(self, deq, itergen):
self.counter = len(deq)
def giveup():
self.counter = 0
#raise RuntimeError, "deque mutated during iteration"
raise RuntimeError("deque mutated during iteration")
self._gen = itergen(deq.state, giveup)
def next(self):
res = self._gen.next()
self.counter -= 1
return res
def __iter__(self):
return self
class defaultdict(dict):
def __init__(self, *args, **kwds):
if len(args) > 0:
default_factory = args[0]
args = args[1:]
if not callable(default_factory) and default_factory is not None:
raise TypeError("first argument must be callable")
else:
default_factory = None
dict.__init__(self, args, kwds)
self.default_factory = default_factory
self.update(args, kwds)
#super(defaultdict, self).__init__(*args, **kwds)
#fixme.. had to add this function to get defaultdict working with brython correctly
def __getitem__(self, key):
if self.__contains__(key):
return dict.__getitem__(self,key)
return self.__missing__(key)
def __missing__(self, key):
# from defaultdict docs
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __repr__(self, recurse=set()):
if id(self) in recurse:
return "defaultdict(...)"
try:
recurse.add(id(self))
return "defaultdict(%s, %s)" % (repr(self.default_factory), super(defaultdict, self).__repr__())
finally:
recurse.remove(id(self))
def copy(self):
return type(self)(self.default_factory, self)
def __copy__(self):
return self.copy()
def __reduce__(self):
#
#__reduce__ must return a 5-tuple as follows:
#
# - factory function
# - tuple of args for the factory function
# - additional state (here None)
# - sequence iterator (here None)
# - dictionary iterator (yielding successive (key, value) pairs
# This API is used by pickle.py and copy.py.
#
return (type(self), (self.default_factory,), None, None, self.iteritems())
from operator import itemgetter as _itemgetter
from keyword import iskeyword as _iskeyword
import sys as _sys
def namedtuple(typename, field_names, verbose=False, rename=False):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, str):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
if rename:
names = list(field_names)
seen = set()
for i, name in enumerate(names):
if (not min(c.isalnum() or c=='_' for c in name) or _iskeyword(name)
or not name or name[0].isdigit() or name.startswith('_')
or name in seen):
names[i] = '_%d' % i
seen.add(name)
field_names = tuple(names)
for name in (typename,) + field_names:
if not min(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_') and not rename:
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(self):
'Return a new dict which maps field names to their values'
return dict(zip(self._fields, self)) \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
if verbose:
print(template)
# Execute the template string in a temporary namespace
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template,namespace)
except SyntaxError as e:
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example) or sys._getframe is not
# defined for arguments greater than 0 (IronPython).
try:
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return result
if __name__ == '__main__':
Point = namedtuple('Point', ['x', 'y'])
p = Point(11, y=22)
print(p[0]+p[1])
x,y=p
print(x,y)
print(p.x+p.y)
print(p)
|
ngrudzinski/sentiment_analysis_437
|
refs/heads/master
|
scraper.py
|
1
|
#Credit to Marco Bonzanini CC-BY 4.0
import tweepy
from tweepy import OAuthHandler
from tweepy import TweepError
consumer_key = 'KGrJyI9GdujKcowMfrheB78dR'
consumer_secret = 'TEa81xn0OZOwebVyNccqQFkY2Qe0qPJbHixz1GZQZlJZ55jY2V'
access_token = '3021600922-wF6n5jmtjnd6Ip16L3CguHxK4p19OblPXjvJYvF'
access_secret = 'fVpGcfRkzJB0C0lyRUEMr7lGOhz8j1cBQ8PUyQx4FWzaK'
def scrape_tweets(user_id, tweets_scraped):
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth)
# the 10 is an arbitrary value
# using too much bandwidth puts the Twitter Police on you
#for status in tweepy.Cursor(api.home_timeline).items(10):
f = open("scrapings.text", "w+")
try:
timeline = api.user_timeline(screen_name = user_id, include_rts = True, count = tweets_scraped)
except TweepError:
print("Error scraping tweets.\n")
f.close()
return 1
for tweet in timeline:
text = u''.join(tweet.text)
text = text.replace("\n"," ")
f.write(text.encode('utf-8') + "\n")
f.close()
return 0
if __name__ == "__main__":
scrape_tweets("potus", 3)
|
TheOstrichIO/ostrichlib
|
refs/heads/master
|
ostrich/utils/proc.py
|
1
|
# -*- coding: utf-8 -*-
# pylint: disable=unused-import, too-few-public-methods
"""
proc utils module
An adaptation of Python 3.5 subprocess.run function
source: https://github.com/python/cpython/blob/3.5/Lib/subprocess.py
"""
# Python 2 / Python 3 compatibility fu
# http://python-future.org/compatible_idioms.html
from __future__ import absolute_import
from __future__ import unicode_literals # so strings without u'' are unicode
try:
# Python 3.2 and above - use builtin subprocess module with timeout support
import subprocess
from subprocess import PIPE, Popen, SubprocessError, TimeoutExpired
__timeout__ = True
except ImportError:
try:
# Pre-Python 3.2, try using subprocess32 package if available,
# to gain timeout functionality
import subprocess32 as subprocess
from subprocess import PIPE, Popen, SubprocessError, TimeoutExpired
__timeout__ = True
except ImportError:
# No subprocess32 package, gracefully degrade to no-timeout behavior
import subprocess
from subprocess import PIPE, Popen
__timeout__ = False
# Exception classes used by this module.
class SubprocessError(Exception):
pass
class TimeoutExpired(SubprocessError):
pass
class CalledProcessError(SubprocessError):
"""This exception is raised when a process run by run() with check=True
returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
The cmd (run args) will be stored in the cmd attribute;
The output will be stored in output / stdout attribute;
The stderr will be stored in stderr attribute.
"""
def __init__(self, returncode, cmd, output=None, stderr=None):
super(CalledProcessError, self).__init__()
self.returncode = returncode
self.cmd = cmd
self.output = output
self.stderr = stderr
def __str__(self):
return ("Command '{0}' returned non-zero exit status {1}"
.format(self.cmd, self.returncode))
@property
def stdout(self):
"""Alias for output attribute, to match stderr"""
return self.output
class _TimeoutExpired(TimeoutExpired):
"""This exception is raised when the timeout expires while waiting for a
child process."""
def __init__(self, cmd, timeout, output=None, stderr=None):
super(_TimeoutExpired, self).__init__(cmd, timeout)
self.cmd = cmd
self.timeout = timeout
self.output = output
self.stderr = stderr
def __str__(self):
return ("Command '{0}' timed out after {1} seconds"
.format(self.cmd, self.timeout))
@property
def stdout(self):
return self.output
class CompletedProcess(object):
"""A process that has finished running.
This is returned by run().
Attributes:
- args: The list or str args passed to run().
- returncode: The exit code of the process, negative for signals.
- stdout: The standard output (None if not captured).
- stderr: The standard error (None if not captured).
"""
def __init__(self, args, returncode, stdout=None, stderr=None):
self.args = args
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
args = ['args={0!r}'.format(self.args),
'returncode={0!r}'.format(self.returncode)]
if self.stdout is not None:
args.append('stdout={0!r}'.format(self.stdout))
if self.stderr is not None:
args.append('stderr={0!r}'.format(self.stderr))
return "{0}({1})".format(type(self).__name__, ', '.join(args))
def check_returncode(self):
"""Raise CalledProcessError if the exit code is non-zero."""
if self.returncode:
raise CalledProcessError(self.returncode, self.args, self.stdout,
self.stderr)
def run(*popenargs, **kwargs):
"""Run command with arguments and return a `CompletedProcess` instance.
The returned instance will have attributes args, returncode, stdout and
stderr.
By default, stdout and stderr are not captured, and those attributes
will be None. Pass stdout=PIPE and/or stderr=PIPE in order to capture them.
If `check` is True and the exit code was non-zero, it raises a
CalledProcessError. The CalledProcessError object will have the return code
in the returncode attribute, and output & stderr attributes if those
streams were captured.
If `timeout` is given, and the process takes too long, a TimeoutExpired
exception will be raised, if timeout is supported in the underlying Popen
implementation (e.g. Python >= 3.2, or an available subprocess32 package).
There is an optional argument `input`, allowing you to
pass a string to the subprocess's stdin. If you use this argument
you may not also use the Popen constructor's `stdin` argument, as
it will be used internally.
The other arguments are the same as for the Popen constructor.
If universal_newlines=True is passed, the `input` argument must be a
string and stdout/stderr in the returned object will be strings rather than
bytes.
"""
stdin = kwargs.pop('input', None)
timeout = kwargs.pop('timeout', None)
check = kwargs.pop('check', False)
if stdin is not None:
if 'stdin' in kwargs:
raise ValueError('stdin and input arguments may not both be used.')
kwargs['stdin'] = PIPE
process = Popen(*popenargs, **kwargs)
try:
if __timeout__:
stdout, stderr = process.communicate(stdin, timeout=timeout)
else:
stdout, stderr = process.communicate(stdin)
except TimeoutExpired:
# this will never happen if __timeout__ is False
process.kill()
stdout, stderr = process.communicate()
# pylint: disable=no-member
raise _TimeoutExpired(process.args, timeout, output=stdout,
stderr=stderr)
except:
process.kill()
process.wait()
raise
retcode = process.poll()
if check and retcode:
raise CalledProcessError(retcode, popenargs,
output=stdout, stderr=stderr)
return CompletedProcess(popenargs, retcode, stdout, stderr)
|
tommyip/zulip
|
refs/heads/master
|
zerver/management/commands/send_test_email.py
|
1
|
from typing import Any
from django.conf import settings
from django.core.mail import mail_admins, mail_managers, send_mail
from django.core.management import CommandError
from django.core.management.commands import sendtestemail
from zerver.lib.send_email import FromAddress
class Command(sendtestemail.Command):
def handle(self, *args: Any, **kwargs: str) -> None:
if settings.WARN_NO_EMAIL:
raise CommandError("Outgoing email not yet configured, see\n "
"https://zulip.readthedocs.io/en/latest/production/email.html")
if len(kwargs['email']) == 0:
raise CommandError("Usage: /home/zulip/deployments/current/manage.py "
"send_test_email username@example.com")
print("If you run into any trouble, read:")
print()
print(" https://zulip.readthedocs.io/en/latest/production/email.html#troubleshooting")
print()
print("The most common error is not setting `ADD_TOKENS_TO_NOREPLY_ADDRESS=False` when")
print("using an email provider that doesn't support that feature.")
print()
print("Sending 2 test emails from:")
message = ("Success! If you receive this message (and a second with a different subject), "
"you've successfully configured sending emails from your Zulip server. "
"Remember that you need to restart "
"the Zulip server with /home/zulip/deployments/current/scripts/restart-server "
"after changing the settings in /etc/zulip before your changes will take effect.")
sender = FromAddress.SUPPORT
print(" * %s" % (sender,))
send_mail("Zulip email test", message, sender, kwargs['email'])
noreply_sender = FromAddress.tokenized_no_reply_address()
print(" * %s" % (noreply_sender,))
send_mail("Zulip noreply email test", message, noreply_sender, kwargs['email'])
print()
print("Successfully sent 2 emails to %s!" % (", ".join(kwargs['email']),))
if kwargs['managers']:
mail_managers("Zulip manager email test", "This email was sent to the site managers.")
if kwargs['admins']:
mail_admins("Zulip admins email test", "This email was sent to the site admins.")
|
TemplateVoid/mapnik
|
refs/heads/master
|
scons/scons-local-2.3.1/SCons/Platform/sunos.py
|
11
|
"""engine.SCons.Platform.sunos
Platform-specific initialization for Sun systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/sunos.py 2014/03/02 14:18:15 garyo"
import posix
def generate(env):
posix.generate(env)
# Based on sunSparc 8:32bit
# ARG_MAX=1048320 - 3000 for environment expansion
env['MAXLINELENGTH'] = 1045320
env['PKGINFO'] = 'pkginfo'
env['PKGCHK'] = '/usr/sbin/pkgchk'
env['ENV']['PATH'] = env['ENV']['PATH'] + ':/opt/SUNWspro/bin:/usr/ccs/bin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
marcoconstancio/yanta
|
refs/heads/master
|
actions/insert_image/insert_image.py
|
1
|
# -*- coding: utf-8 -*-
import base64
import os
from PyQt5.QtWidgets import QFileDialog
class insert_image:
def __init__(self):
pass
@staticmethod
def process(data, args):
image_path, extra = QFileDialog.getOpenFileName(None,
'Select Image',
data['functions'].session('current_note_location'),
"All Images (*.jpg *.jpeg *.png *.gif *.bmp)")
if image_path:
embed_images = data['functions'].config('Embed Images in notes')
if embed_images:
data['note_viewer'].call_function('insert_embedded_image', image_path)
else:
data['note_viewer'].call_function('insert_image', image_path, data['functions'].session('current_note_location'))
|
randynobx/ansible
|
refs/heads/devel
|
test/integration/targets/module_utils/other_mu_dir/mork.py
|
294
|
data = 'mork'
|
iancrossfield/aries_reduce
|
refs/heads/master
|
analysis.py
|
1
|
""" My module for various data analysis tasks.
:REQUIREMENTS: :doc:`numpy`, :doc:`tools` (for :func:`errxy`)
2008-07-25 16:20 IJC: Created.
2009-12-08 11:31 IJC: Updated transit flag in planet objects and
:func:`rveph` function.
2010-02-18 14:06 IJC: Added :func:`medianfilter`
2010-08-03 15:38 IJC: Merged versions.
2010-10-28 11:53 IJMC: Updated documentation strings for Sphinx;
moved pylab import inside individual
function.
2011-04-13 14:29 IJMC: Added keyword functionality to :func:`fmin`
(taken from scipy.optimize).
2011-04-14 09:48 IJMC: Added a few fundamental constants.
2011-04-22 14:48 IJMC: Added :func:`trueanomaly` and
:func:`eccentricanomaly`.
2011-06-21 13:15 IJMC: Added :func:`get_t23` and :func:`get_t14` to
planet objects.
"""
import numpy as np
from numpy import ones, std, sum, mean, median, array, linalg, tile, concatenate, floor, Inf, arange, meshgrid, zeros, sin, cos, tan, arctan, sqrt, exp, nan, max
import pdb
from pylab import find
from scipy import optimize
import scipy.odr as odr
c = 299792458 # speed of light, m/s
h = 6.626068e-34 # SI units: Planck's constant
k = 1.3806503e-23 # SI units: Boltzmann constant, J/K
G = 6.67300e-11 # SI units: Gravitational constant
sigma = 5.670373e-8 # SI units: Stefan-Boltzmann constant
qe = 1.602176565e-19 # Electron charge, in Coulombs
me = 9.11e-31 # electron mass, in kg
ev = 1.602176565e-19 # electron Volt, in Joules
amu = 1.66053886e-27 # atomic mass unit, in kg
mh = 1.008 * amu # mass of hydrogen atom, in kg
pi = 3.14159265358979
AU = 149597870691.0 # AU in meters
day = 86400.0 # seconds in a Julian Day
rsun = 6.95508e8 # Solar mean radius, in m
msun = 1.9891e30 # solar mass in kg
rearth = 6.378136e6 # Earth's equatorial radius in m; [Allen's]
mearth = 5.9737e24 # in kg; [Allen's]
rjup = 7.1492e7 # Jupiter equatorial radius in m
mjup = 1898.7e24 # Jupiter mass in kg
pc = 3.08568025e16 # parsec in meters
class planet:
"""Very handy planet object.
Best initialized using :func:`getobj`.
:REQUIREMENTS: Database file `exoplanets.csv` from http://exoplanets.org/
"""
# 2010-03-07 22:21 IJC: Created
# 2011-01-22 17:15 IJMC: Updated format b/c CPS folks keep changing
# their database file format
# 2011-05-19 16:11 IJMC: Updated format b/c CPS folks keep
# changing their database file format -- but
# it's almost worth it to finally have
# stellar radii.
# 2014-11-21 15:31 IJMC: Simplified check on number of keys vs. args.
def __init__(self, *args):
keys = ['name','comp','ncomp','mult','discmeth','firstref','firsturl','date','jsname','etdname','per','uper','t0','ut0','ecc','uecc','ueccd','om','uom','k','uk','msini','umsini','a','ua','orbref','orburl','transit','t14','ut14','tt','utt','ar','uar','uard','i','ui','uid','b','ub','ubd','depth','udepth','udepthd','r','ur','density','udensity','gravity','ugravity','transitref','transiturl','trend','dvdt','udvdt','freeze_ecc','rms','chi2','nobs','star','hd','hr','hipp','sao','gl','othername','sptype','binary','v','bmv','j','h','ks','ra','dec','ra_string','dec_string','rstar', 'urstar', 'urstard', 'rstarref', 'rstarurl','mstar','umstar','umstard','teff','uteff','vsini','uvsini','fe','ufe','logg','ulogg','shk','rhk','par','upar','distance','udistance','lambd', 'ulambd', 'massref','massurl','specref','specurl','distref','disturl','simbadname','nstedid','binaryref', 'binaryurl']
if len(keys)>len(args):
print "Incorrect number of input arguments (%i, but should be %i)" % (len(args), len(keys))
return None
for key,arg in zip(keys, args):
try:
temp = float(arg)+1
isnumber = True
except:
isnumber = False
if isnumber:
exec('self.%s=%s' % (key,arg) )
else:
exec('self.%s="%s"' % (key,arg) )
return None
def get_t23(self, *args):
"""Compute full transit duration (in days) for a transiting planet.
Returns:
nan if required fields are missing.
Using Eq. 15 of J. Winn's chapter in S. Seager's book "Exoplanets."
:SEE ALSO:
:func:`get_t14`
"""
# 2011-06-21 12:53 IJMC: Created
# Get necessary parameters:
per, ra, k, b, inc, ecc, om = self.per, 1./self.ar, sqrt(self.depth), self.b, self.i, self.ecc, self.om
inc *= np.pi/180.
om *= np.pi/180.
ret = 0.
# Check that no parameters are empty:
if per=='' or k=='':
ret = nan
if b=='':
try:
b = (cos(inc)/ra) * (1. - ecc**2) / (1. + ecc * sin(om))
except:
ret = nan
elif ra=='':
try:
ra = (cos(inc)/b) * (1. - ecc**2) / (1. + ecc * sin(om))
except:
ret = nan
elif inc=='':
try:
inc = np.arccos((b * ra) / ((1. - ecc**2) / (1. + ecc * sin(om))))
except:
ret = nan
if np.isnan(ret):
print "Could not compute t_23. Parameters are:"
print "period>>", per
print "r_*/a>>", ra
print "r_p/r_*>>", k
print "impact parameter (b)>>", b
print "inclination>>", inc*180./np.pi, " deg"
print "eccentricity>>", ecc
print "omega>>", om*180./np.pi, " deg"
else:
ret = (per/np.pi) * np.arcsin(ra * np.sqrt((1. - k)**2 - b**2) / np.sin(inc))
return ret
def get_t14(self, *args):
"""Compute total transit duration (in days) for a transiting planet.
Returns:
nan if required fields are missing.
Using Eq. 14 of J. Winn's chapter in S. Seager's book "Exoplanets."
:SEE ALSO:
:func:`get_t23`
"""
# 2011-06-21 12:53 IJMC: Created
# Get necessary parameters:
per, ra, k, b, inc, ecc, om = self.per, 1./self.ar, sqrt(self.depth), self.b, self.i, self.ecc, self.om
inc *= np.pi/180.
om *= np.pi/180.
ret = 0.
# Check that no parameters are empty:
if per=='' or k=='':
ret = nan
if b=='':
try:
b = (cos(inc)/ra) * (1. - ecc**2) / (1. + ecc * sin(om))
except:
ret = nan
elif ra=='':
try:
ra = (cos(inc)/b) * (1. - ecc**2) / (1. + ecc * sin(om))
except:
ret = nan
elif inc=='':
try:
inc = np.arccos((b * ra) / ((1. - ecc**2) / (1. + ecc * sin(om))))
except:
ret = nan
if np.isnan(ret):
print "Could not compute t_14. Parameters are:"
print "period>>", per
print "r_*/a>>", ra
print "r_p/r_*>>", k
print "impact parameter (b)>>", b
print "inclination>>", inc*180./np.pi, " deg"
print "eccentricity>>", ecc
print "omega>>", om*180./np.pi, " deg"
else:
ret = (per/np.pi) * np.arcsin(ra * np.sqrt((1. + k)**2 - b**2) / np.sin(inc))
return ret
def get_scaleheight(self, ab=0.2, f=0.33, mu=2.3):
"""Compute atmospheric scale height (in m).
:INPUTS:
ab : scalar, 0 <= ab <= 1
Bond albedo.
f : scalar, 0.25 <= ab <= 2/3.
Recirculation efficiency. A value of 0.25 indicates full
redistribution of incident heat, while 2/3 indicates zero
redistribution.
mu : scalar
Mean atmospheric ('molecular') mass, in amu.
"""
# 2016-07-14 10:29 IJMC: Created
teq = self.get_teq(ab=ab, f=f)
try:
mass = mjup * self.msini / np.sin(self.i * np.pi/180.)
except:
mass = self.msini * mjup
g = G * mass / (self.r * rjup)**2
return k*teq / (mu*amu * g)
def get_teq(self, ab, f, reterr=False):
"""Compute equilibrium temperature.
:INPUTS:
ab : scalar, 0 <= ab <= 1
Bond albedo.
f : scalar, 0.25 <= ab <= 2/3.
Recirculation efficiency. A value of 0.25 indicates full
redistribution of incident heat, while 2/3 indicates zero
redistribution.
:EXAMPLE:
::
import analysis
planet = analysis.getobj('HD 189733 b')
planet.get_teq(0.0, 0.25) # zero albedo, full recirculation
:REFERENCE:
S. Seager, "Exoplanets," 2010. Eq. 3.9
"""
# 2012-09-07 16:24 IJMC: Created
return self.teff/np.sqrt(self.ar) * (f * (1. - ab))**0.25
def rv(self, **kw):
"""Compute radial velocity on a planet object for given Julian Date.
:EXAMPLE:
::
import analysis
p = analysis.getobj('HD 189733 b')
jd = 2400000.
print p.rv(jd)
refer to function `analysis.rv` for full documentation.
SEE ALSO: :func:`analysis.rv`, :func:`analysis.mjd`
"""
return rv(self, **kw)
def rveph(self, jd):
"""Compute the most recently elapsed RV emphemeris of a given
planet at a given JD. RV ephemeris is defined by the having
radial velocity equal to zero.
refer to :func:`analysis.rv` for full documentation.
SEE ALSO: :func:`analysis.getobj`, :func:`analysis.phase`
"""
return rveph(self, jd)
def phase(self, hjd):
"""Get phase of an orbiting planet.
refer to function `analysis.getorbitalphase` for full documentation.
SEE ALSO: :func:`analysis.getorbitalphase`, :func:`analysis.mjd`
"""
# 2009-09-28 14:07 IJC: Implemented object-oriented version
return getorbitalphase(self, hjd)
def writetext(self, filename, **kw):
"""See :func:`analysis.planettext`"""
return planettext(self, filename, **kw)
def loaddata(filelist, path='', band=1):
"""Load a set of reduced spectra into a single data file.
datalist = ['file1.fits', 'file2.fits']
datapath = '~/data/'
data = loaddata(datalist, path=datapath, band=1)
The input can also be the name of an IRAF-style file list.
"""
# 2008-07-25 16:26 IJC: Created
# 2010-01-20 12:58 IJC: Made function work for IRTF low-res data.
# Replaced 'warn' command with 'print'.
# 2011-04-08 11:42 IJC: Updated; moved inside analysis.py.
# 2011-04-12 09:57 IJC: Fixed misnamed imports
try:
from astropy.io import fits as pyfits
except:
import pyfits
data = array([])
if filelist.__class__==str or isinstance(filelist,np.string_):
filelist = ns.file2list(filelist)
elif filelist.__class__<>list:
print('Input to loaddata must be a python list or string')
return data
num = len(filelist)
# Load one file just to get the dimensions right.
irspec = pyfits.getdata(filelist[0])
ii = 0
for element in filelist:
irspec = pyfits.getdata(element)
if ii==0:
irsh = irspec.shape
data = zeros((num,)+irsh[1::], float)
if len(irsh)>2:
for jj in range(irsh[1]):
data[ii, jj, :] = irspec[band-1, jj, :]
else:
data[ii,:] = irspec[band-1,:]
ii=ii+1
return data
def getobj(*args, **kw):
"""Get data for a specified planet.
:INPUTS: (str) -- planet name, e.g. "HD 189733 b"
:OPTIONAL INPUTS:
datafile : str
datafile name
verbose : bool
verbosity flag
:EXAMPLE:
::
p = getobj('55cnce')
p.period ---> 2.81705
The attributes of the returned object are many and varied, and
can be listed using the 'dir' command on the returned object.
This looks up data from the local datafile, which could be out
of date.
SEE ALSO: :func:`rv`"""
# 2008-07-30 16:56 IJC: Created
# 2010-03-07 22:24 IJC: Updated w/new exoplanets.org data table!
# 2010-03-11 10:01 IJC: If planet name not found, return list of
# planet names. Restructured input format.
# 2010-11-01 13:30 IJC: Added "import os"
# 2011-05-19 15:56 IJC: Modified documentation.
import os
if kw.has_key('datafile'):
datafile=kw['datafile']
else:
datafile=os.path.expanduser('~/python/exoplanets.csv')
if kw.has_key('verbose'):
verbose = kw['verbose']
else:
verbose=False
if len(args)==0:
inp = 'noplanetname'
else:
inp = args[0]
if verbose: print "datafile>>" + datafile
f = open(datafile, 'r')
data = f.readlines()
f.close()
data = data[1::] # remove header line
names = array([line.split(',')[0] for line in data])
foundobj = (names==inp)
if (not foundobj.any()):
if verbose: print "could not find desired planet; returning names of known planets"
return names
else:
planetind = int(find(foundobj))
pinfo = data[planetind].strip().split(',')
myplanet = planet(*pinfo)
return myplanet
def getorbitalphase(planet, hjd, **kw):
"""Get phase of an orbiting planet.
INPUT:
planet -- a planet from getobj; e.g., getobj('55cnce')
hjd
OUTPUT:
orbital phase -- from 0 to 1.
NOTES:
If planet.transit==True, phase is based on the transit time ephemeris.
If planet.transit==False, phase is based on the RV ephemeris as
computed by function rveph
SEE ALSO: :func:`getobj`, :func:`mjd`, :func:`rveph`
"""
hjd = array(hjd).copy()
if bool(planet.transit)==True:
thiseph = planet.tt
else:
thiseph = planet.rveph(hjd.max())
orbphase = ((hjd - thiseph) ) / planet.per
orbphase -= int(orbphase.mean())
return orbphase
def mjd(date):
"""Convert Julian Date to Modified Julian Date, or vice versa.
if date>=2400000.5, add 2400000.5
if date<2400000.5, subtract 2400000.5
"""
# 2009-09-24 09:54 IJC: Created
date = array(date, dtype=float, copy=True, subok=True)
offset = 2400000.5
if (date<offset).all():
date += offset
elif (date>=offset).all():
date -= offset
else:
print "Input contains date both below and above %f" % offset
return date
def rveph(p, jd):
"""Compute the most recently elapsed RV emphemeris of a given
planet at a given JD. RV ephemeris is defined by the having
radial velocity equal to zero.
:EXAMPLE:
::
from analysis import getobj, rveph
jd = 2454693 # date: 2008/08/14
p = getobj('55cnce') # planet: 55 Cnc e
t = rveph(p, jd)
returns t ~
SEE ALSO: :func:`getobj`, :func:`phase`
"""
# 2009-12-08 11:20 IJC: Created. Ref: Beauge et al. 2008 in
# "Extrasolar Planets," R. Dvorak ed.
# 2010-03-12 09:34 IJC: Updated for new planet-style object.
from numpy import cos, arccos, arctan, sqrt, tan, pi, sin, int
if p.__class__<>planet:
raise Exception, "First input must be a 'planet' object."
omega = p.om*pi/180
tau = p.t0
ecc = p.ecc
per = p.per
f = arccos(-ecc * cos(omega)) - omega # true anomaly
u = 2.*arctan(sqrt((1-ecc)/(1+ecc))*tan(f/2.)) # eccentric anomaly
n = 2*pi/per
time0 = tau+ (u-ecc*sin(u))/n
norb = int((time0-jd)/per)
time = time0-norb*per
return time
def rv(p, jd=None, e=None, reteanom=False, tol=1e-8):
""" Compute unprojected astrocentric RV of a planet for a given JD in m/s.
:INPUTS:
p : planet object, or 5- or 6-sequence
planet object: see :func:`get_obj`
OR:
sequence: [period, t_peri, ecc, a, long_peri, gamma]
(if gamma is omitted, it is set to zero)
(long_peri should be in radians!)
jd : NumPy array
Dates of observation (in same time system as t_peri).
e : NumPy array
Eccentric Anomaly of observations (can be precomputed to
save time)
:EXAMPLE:
::
jd = 2454693 # date: 2008/08/14
p = getobj('55 Cnc e') # planet: 55 Cnc e
vp = rv(p, jd)
returns vp ~ 1.47e5 [m/s]
The result will need to be multiplied by the sine of the
inclination angle (i.e., "sin i"). Positive radial velocities
are directed _AWAY_ from the observer.
To compute the barycentric radial velocity of the host star,
scale the returned values by the mass ratio -m/(m+M).
SEE ALSO: :func:`getobj`, :func:`rvstar`
"""
# 2008-08-13 12:59 IJC: Created with help from Debra Fischer,
# Murray & Dermott, and Beauge et al. 2008 in "Extrasolar
# Planets," R. Dvorak ed.
# 2008-09-25 12:55 IJC: Updated documentation to be more clear.
# 2009-10-01 10:25 IJC: Moved "import" statement within func.
# 2010-03-12 09:13 IJC: Updated w/new planet-type objects
# 2012-10-15 17:20 IJMC: First input can now be a list of
# elements. Added option to pass in
# eccentric anomaly.
# 2016-10-21 10:49 IJMC: Now include gamma in calculation
jd = array(jd, copy=True, subok=True)
if jd.shape==():
singlevalueinput = True
jd = array([jd])
else:
singlevalueinput = False
if ((jd-2454692) > 5000).any():
raise Exception, "Implausible Julian Date entered."
if hasattr(p, '__iter__'):
if len(p)==5:
per, tau, ecc, a, omega = p
gamma = 0.
else:
per, tau, ecc, a, omega, gamma = p[0:6]
if ecc > 1: # Reset eccentricity directly
ecc = 1. - tol
p[2] = ecc
elif ecc < 0:
ecc = np.abs(ecc)
p[2] = ecc
else:
if p.__class__<>planet:
raise Exception, "First input must be a 'planet' object."
try:
per = p.per
tau = p.t0
ecc = p.ecc
a = p.a
omega = p.om * pi/180.0
gamma = 0.
except:
raise Exception, "Could not load all desired planet parameters."
if ecc < 0:
ecc = np.abs(ecc)
if ecc > 1:
ecc = 1. - tol
if e is None:
n = 2.*pi/per # mean motion
m = n*(jd - tau) # mean anomaly
e = []
for element in m: # compute eccentric anomaly
def kep(e): return element - e + ecc*sin(e)
e.append(optimize.brentq(kep, element-1, element+1, xtol=tol, disp=False))
#e.append(optimize.newton(kep, 0, tol=tol))
else:
pass
e = array(e, copy=False)
f = 2. * arctan( sqrt((1+ecc)/(1.-ecc)) * tan(e/2.) )
#r = a*(1-ecc*cos(e))
#x = r*cos(f)
#y = r*sin(f)
K = n * a / sqrt(1-ecc**2)
vz = -K * ( cos(f+omega) + ecc*cos(omega) )
vzms = vz * AU/day # convert to m/s
vzms += gamma
if singlevalueinput:
vzms = vzms[0]
if reteanom:
ret = vzms, e
else:
ret = vzms
return ret
def rvstar(p, jd=None, e=None, reteanom=False, tol=1e-8):
""" Compute radial velocity of a star which has an orbiting planet.
:INPUTS:
p : planet object, or 5- or 6-sequence
planet object: see :func:`get_obj`
OR:
sequence: [period, t_peri, ecc, K, long_peri, gamma]
(if gamma is omitted, it is set to zero)
jd : NumPy array
Dates of observation (in same time system as t_peri).
e : NumPy array
Eccentric Anomaly of observations (can be precomputed to
save time)
:EXAMPLE:
::
jd = 2454693 # date: 2008/08/14
p = getobj('55 Cnc e') # planet: 55 Cnc e
vp = rv(p, jd)
Positive radial velocities are directed _AWAY_ from the
observer.
:SEE_ALSO: :func:`rv`, :func:`getobj`
"""
# 2012-10-15 22:34 IJMC: Created from function 'rv'
jd = array(jd, copy=True, subok=True)
if jd.shape==():
singlevalueinput = True
jd = array([jd])
else:
singlevalueinput = False
if ((jd-2454692) > 5000).any():
raise Exception, "Implausible Julian Date entered."
if hasattr(p, '__iter__'):
if len(p)==5:
per, tau, ecc, k, omega = p
gamma = 0.
else:
per, tau, ecc, k, omega, gamma = p[0:6]
omega *= np.pi/180.
if ecc < 0:
ecc = np.abs(ecc)
p[2] = ecc
elif ecc > 1:
ecc = 1. - tol
p[2] = ecc
else:
if p.__class__<>planet:
raise Exception, "First input must be a 'planet' object."
try:
per = p.per
tau = p.t0
ecc = p.ecc
k = p.k
omega = p.om * pi/180.0
gamma = 0.
except:
raise Exception, "Could not load all desired planet parameters."
if ecc < 0:
ecc = np.abs(ecc)
if ecc > 1:
ecc = 1. - tol
if e is None:
n = 2.*pi/per # mean motion
m = n*(jd - tau) # mean anomaly
e = np.zeros(m.shape)
for ii,element in enumerate(m): # compute eccentric anomaly
def kep(e): return element - e + ecc*sin(e)
e[ii] = optimize.brentq(kep, element-1, element+1, xtol=tol, disp=False)
#e.append(optimize.newton(kep, 0, tol=tol))
else:
pass
# Compute true anomaly:
f = 2. * arctan( sqrt((1+ecc)/(1.-ecc)) * tan(e/2.) )
vrstar = k * (np.cos(f + omega) + ecc*np.cos(omega)) + gamma
if singlevalueinput:
vrstar = vrstar[0]
if reteanom:
ret = vrstar, e
else:
ret = vrstar
return ret
def dopspec(starspec, planetspec, starrv, planetrv, disp, starphase=[], planetphase=[], wlscale=True):
""" Generate combined time series spectra using planet and star
models, planet and star RV profiles.
D = dopspec(sspec, pspec, sRV, pRV, disp, sphase=[], pphase=[])
:INPUTS:
sspec, pspec: star, planet spectra; must be on a common
logarithmic wavelength grid
sRV, pRV: star, planet radial velocities in m/s
disp: constant logarithmic dispersion of the wavelength
grid: LAMBDA_i/LAMBDA_(i-1)
:OPTIONAL INPUTS:
sphase, pphase: normalized phase functions of star and planet.
The inputs sspec and pspec will be scaled
by these values for each observation.
wlscale: return relative wavelength scale for new data
NOTE: Input spectra must be linearly spaced in log wavelength and increasing:
that is, they must have [lambda_i / lambda_(i-1)] = disp =
constant > 1
Positive velocities are directed AWAY from the observer."""
#2008-08-19 16:30 IJC: Created
# Options: 1. chop off ends or not? 2. phase function.
# 4. Noise level 5. telluric? 6. hold star RV constant
# Initialize:
starspec = array(starspec ).ravel()
planetspec = array(planetspec).ravel()
starrv = array(starrv ).ravel()
planetrv = array(planetrv ).ravel()
ns = len(starspec)
nr = len(starrv)
starphase = array(starphase ).ravel()
planetphase = array(planetphase).ravel()
if len(starphase)==0:
starphase = ones(nr, float)
if len(planetphase)==0:
planetphase = ones(nr, float)
# Check inputs:
if ns<>len(planetspec):
raise Exception, "Star and planet spectra must be same length."
if nr<>len(planetrv):
raise Exception, "Star and planet RV profiles must be same length."
logdisp = log(disp)
# Calculate wavelength shift limits for each RV point
sshift = ( log(1.0+starrv /c) / logdisp ).round()
pshift = ( log(1.0+planetrv/c) / logdisp ).round()
limlo = int( concatenate((sshift, pshift)).min() )
limhi = int( concatenate((sshift, pshift)).max() )
ns2 = ns + (limhi - limlo)
data = zeros((nr, ns2), float)
# Iterate over RV values, constructing spectra
for ii in range(nr):
data[ii, (sshift[ii]-limlo):(ns+sshift[ii]-limlo)] = \
starphase[ii] * starspec
data[ii, (pshift[ii]-limlo):(ns+pshift[ii]-limlo)] = \
data[ii, (pshift[ii]-limlo):(ns+pshift[ii]-limlo)] + \
planetphase[ii] * planetspec
if wlscale:
data = (data, disp**(arange(ns2) + limlo))
return data
def loadatran(filename, wl=True, verbose=False):
""" Load ATRAN atmospheric transmission data file.
t = loadatran(filename, wl=True)
INPUT:
filename -- filename of the ATRAN file. This should be an
ASCII array where the second column is
wavelength and the third is the atmospheric
transmission.
(This can also be a list of filenames!)
:OPTIONAL INPUT:
wl -- if True (DEFAULT) also return the wavelength scale.
This can take up to twice as long for large files.
RETURNS:
if wl==True: returns a 2D array, with columns [lambda, transmission]
if wl==False: returns a 1D Numpy array of the transmission
NOTE: A list of these return values is created if
'filename' is actually an input list."""
# 2008-08-21 09:42 IJC: Created to save myself a bit of time
# 2008-08-25 10:08 IJC: Read in all lines at once; should go
# faster with sufficient memory
# 2008-09-09 13:56 IJC: Only convert the wavelength and flux
# columns(#1 & #2) -- speed up slightly.
if filename.__class__==list:
returnlist = []
for element in filename:
returnlist.append(loadatran(element, wl=wl))
return returnlist
f = open(filename, 'r')
dat = f.readlines()
f.close()
if verbose:
print dat[0]
print dat[0].split()
print dat[0].split()[1:3]
print dat[0].split()[2]
if wl:
data = array([map(float, line.split()[1:3]) for line in dat])
else:
data = array([float(line.split()[2]) for line in dat])
return data
def poly2cheby(cin):
"""Convert straight monomial coefficients to chebychev coefficients.
INPUT: poly coefficients (e.g., for use w/polyval)
OUTPUT: chebyt coefficients
SEE ALSO: :func:`gpolyval`; scipy.special.chebyt
"""
# 2009-07-07 09:41 IJC: Created
from scipy.special import poly1d, chebyt
cin = poly1d(cin)
cout = []
ord = cin.order
for ii in range(ord+1):
chebyii = chebyt(ord-ii)
cout.append(cin.coeffs[0]/chebyii.coeffs[0])
cin -= chebyii*cout[ii]
return cout
def cheby2poly(cin):
"""Convert chebychev coefficients to 'normal' polyval coefficients .
:INPUT: chebyt coefficients
:OUTPUT: poly coefficients (e.g., for use w/polyval)
:SEE ALSO: :func:`poly2cheby`, :func:`gpolyval`; scipy.special.chebyt
"""
# 2009-10-22 22:19 IJC: Created
from scipy.special import poly1d, chebyt
cin = poly1d(cin)
cout = poly1d(0)
ord = cin.order
for ii in range(ord+1):
cout += chebyt(ii)*cin[ii]
return cout
def gpolyval(c,x, mode='poly', retp=False):
"""Generic polynomial evaluator.
INPUT:
c (1D array) -- coefficients of polynomial to evaluate,
from highest-order to lowest.
x (1D array) -- pixel values at which to evaluate C
OPTINAL INPUTS:
MODE='poly' -- 'poly' uses standard monomial coefficients
as accepted by, e.g., polyval. Other
modes -- 'cheby' (1st kind) and 'legendre'
(P_n) -- convert input 'x' to a normalized
[-1,+1] domain
RETP=False -- Return coefficients as well as evaluated poly.
OUTPUT:
y -- array of shape X; evaluated polynomial.
(y, p) (if retp=True)
SEE ALSO: :func:`poly2cheby`
"""
# 2009-06-17 15:42 IJC: Created
# 2011-12-29 23:11 IJMC: Fixed normalization of the input 'x' array
# 2014-12-18 22:55 IJMC: poly1d has moved from scipy.special to NumPy
from scipy import special
from numpy import zeros, polyval, concatenate, poly1d
c = array(c).copy()
nc = len(c)
if mode=='poly':
totalcoef = c
ret = polyval(totalcoef, x)
elif mode=='cheby':
x = 2. * (x - 0.5*(x.max() + x.min())) / (x.max() - x.min())
totalcoef = poly1d([0])
for ii in range(nc):
totalcoef += c[ii]*special.chebyt(nc-ii-1)
ret = polyval(totalcoef, x)
elif mode=='legendre':
x = 2. * (x - 0.5*(x.max() + x.min())) / (x.max() - x.min())
totalcoef = poly1d([0])
for ii in range(nc):
totalcoef += c[ii]*special.legendre(nc-ii-1)
ret = polyval(totalcoef, x)
if retp:
return (ret, totalcoef)
else:
return ret
return -1
def stdr(x, nsigma=3, niter=Inf, finite=True, verbose=False, axis=None):
"""Return the standard deviation of an array after removing outliers.
:INPUTS:
x -- (array) data set to find std of
:OPTIONAL INPUT:
nsigma -- (float) number of standard deviations for clipping
niter -- number of iterations.
finite -- if True, remove all non-finite elements (e.g. Inf, NaN)
axis -- (int) axis along which to compute the mean.
:EXAMPLE:
::
from numpy import *
from analysis import stdr
x = concatenate((randn(200),[1000]))
print std(x), stdr(x, nsigma=3)
x = concatenate((x,[nan,inf]))
print std(x), stdr(x, nsigma=3)
SEE ALSO: :func:`meanr`, :func:`medianr`, :func:`removeoutliers`,
:func:`numpy.isfinite`
"""
# 2010-02-16 14:57 IJC: Created from mear
# 2010-07-01 14:06 IJC: ADded support for higher dimensions
from numpy import array, isfinite, zeros, swapaxes
x = array(x, copy=True)
xshape = x.shape
ndim = len(xshape)
if ndim==0:
return x
if ndim==1 or axis is None:
# "1D" array
x = x.ravel()
if finite:
x = x[isfinite(x)]
x = removeoutliers(x, nsigma, niter=Inf, verbose=verbose)
return x.std()
else:
newshape = list(xshape)
oldDimension = newshape.pop(axis)
ret = zeros(newshape, float)
# Prevent us from taking the action along the axis of primary incidices:
if axis==0:
x=swapaxes(x,0,1)
if axis>1:
nextaxis = axis-1
else:
nextaxis = 0
for ii in range(newshape[0]):
#print 'x.shape>>',x.shape, 'newshape>>',newshape, 'x[ii].shape>>',x[ii].shape, 'ret[ii].shape>>',ret[ii].shape,'ii>>',ii
ret[ii] = stdr(x[ii], nsigma=nsigma,niter=niter,finite=finite,\
verbose=verbose, axis=nextaxis)
return ret
def meanr(x, nsigma=3, niter=Inf, finite=True, verbose=False,axis=None):
"""Return the mean of an array after removing outliers.
:INPUTS:
x -- (array) data set to find mean of
:OPTIONAL INPUT:
nsigma -- (float) number of standard deviations for clipping
niter -- number of iterations.
finite -- if True, remove all non-finite elements (e.g. Inf, NaN)
axis -- (int) axis along which to compute the mean.
:EXAMPLE:
::
from numpy import *
from analysis import meanr
x = concatenate((randn(200),[1000]))
print mean(x), meanr(x, nsigma=3)
x = concatenate((x,[nan,inf]))
print mean(x), meanr(x, nsigma=3)
SEE ALSO: :func:`medianr`, :func:`stdr`, :func:`removeoutliers`,
:func:`numpy.isfinite`
"""
# 2009-10-01 10:44 IJC: Created
# 2010-07-01 13:52 IJC: Now handles higher dimensions.
from numpy import array, isfinite, zeros, swapaxes
x = array(x, copy=True)
xshape = x.shape
ndim = len(xshape)
if ndim==0:
return x
if ndim==1 or axis is None:
# "1D" array
x = x.ravel()
if finite:
x = x[isfinite(x)]
x = removeoutliers(x, nsigma, niter=Inf, verbose=verbose)
return x.mean()
else:
newshape = list(xshape)
oldDimension = newshape.pop(axis)
ret = zeros(newshape, float)
# Prevent us from taking the mean along the axis of primary incidices:
if axis==0:
x=swapaxes(x,0,1)
if axis>1:
nextaxis = axis-1
else:
nextaxis = 0
for ii in range(newshape[0]):
#print 'x.shape>>',x.shape, 'newshape>>',newshape, 'x[ii].shape>>',x[ii].shape, 'ret[ii].shape>>',ret[ii].shape,'ii>>',ii
ret[ii] = meanr(x[ii], nsigma=nsigma,niter=niter,finite=finite,\
verbose=verbose, axis=nextaxis)
return ret
def medianr(x, nsigma=3, niter=Inf, finite=True, verbose=False,axis=None):
"""Return the median of an array after removing outliers.
:INPUTS:
x -- (array) data set to find median of
:OPTIONAL INPUT:
nsigma -- (float) number of standard deviations for clipping
niter -- number of iterations.
finite -- if True, remove all non-finite elements (e.g. Inf, NaN)
axis -- (int) axis along which to compute the mean.
:EXAMPLE:
::
from numpy import *
from analysis import medianr
x = concatenate((randn(200),[1000]))
print median(x), medianr(x, nsigma=3)
x = concatenate((x,[nan,inf]))
print median(x), medianr(x, nsigma=3)
SEE ALSO: :func:`meanr`, :func:`stdr`, :func:`removeoutliers`,
:func:`numpy.isfinite`
"""
# 2009-10-01 10:44 IJC: Created
#2010-07-01 14:04 IJC: Added support for higher dimensions
from numpy import array, isfinite, zeros, median, swapaxes
x = array(x, copy=True)
xshape = x.shape
ndim = len(xshape)
if ndim==0:
return x
if ndim==1 or axis is None:
# "1D" array
x = x.ravel()
if finite:
x = x[isfinite(x)]
x = removeoutliers(x, nsigma, niter=Inf, verbose=verbose)
return median(x)
else:
newshape = list(xshape)
oldDimension = newshape.pop(axis)
ret = zeros(newshape, float)
# Prevent us from taking the action along the axis of primary incidices:
if axis==0:
x=swapaxes(x,0,1)
if axis>1:
nextaxis = axis-1
else:
nextaxis = 0
for ii in range(newshape[0]):
#print 'x.shape>>',x.shape, 'newshape>>',newshape, 'x[ii].shape>>',x[ii].shape, 'ret[ii].shape>>',ret[ii].shape,'ii>>',ii
ret[ii] = medianr(x[ii], nsigma=nsigma,niter=niter,finite=finite,\
verbose=verbose, axis=nextaxis)
return ret
def amedian(a, axis=None):
"""amedian(a, axis=None)
Median the array over the given axis. If the axis is None,
median over all dimensions of the array.
Think of this as normal Numpy median, but preserving dimensionality.
"""
# 2008-07-24 14:12 IJC: Copied from
# http://projects.scipy.org/scipy/numpy/ticket/558#comment:3,
# with a few additions of my own (i.e., keeping the same
# dimensionality).
# 2011-04-08 11:49 IJC: Moved to "analysis.py"
sorted = array(a, subok=True, copy=True)
if axis is None:
return median(sorted.ravel())
ash = list(sorted.shape)
ash[axis] = 1
sorted = np.rollaxis(sorted, axis)
sorted.sort(axis=0)
index = int(sorted.shape[0]/2)
if sorted.shape[0] % 2 == 1:
returnval = sorted[index]
else:
returnval = (sorted[index-1]+sorted[index])/2.0
return returnval.reshape(ash)
def putvecsinarray(vecs):
"""Take a tuple, list, or array of 1D arrays and always return their
Vstacked combination. (Just a helper function, folks!)
"""
# 2017-04-19 12:28 IJMC: Created
if isinstance(vecs,tuple) or isinstance(vecs,list):
vecs = np.vstack(vecs).transpose()
elif isinstance(vecs, np.ndarray) and vecs.ndim < 2:
vecs = vecs.reshape(len(vecs),1)
else:
vecs = np.array(vecs, copy=False)
return vecs
def polyfitr(x, y, N, s, fev=100, w=None, diag=False, clip='both', \
verbose=False, plotfit=False, plotall=False, eps=1e-13, catchLinAlgError=False, xerr=None, yerr=None, retodr=False, checkvals=True):
"""Matplotlib's polyfit with weights and sigma-clipping rejection.
:DESCRIPTION:
Do a best fit polynomial of order N of y to x. Points whose fit
residuals exeed s standard deviations are rejected and the fit is
recalculated. Return value is a vector of polynomial
coefficients [pk ... p1 p0].
:OPTIONS:
w: a set of weights for the data; uses CARSMath's weighted
polynomial fitting routine instead of numpy's standard
polyfit.
NOTE: if using errors in both x and y ("orthogonal
distance regression") then don't set w --- instead set
xerr and yerr (see below).
fev: number of function evaluations to call before stopping
'diag'nostic flag: Return the tuple (p, chisq, n_iter)
clip: 'both' -- remove outliers +/- 's' sigma from fit
'above' -- remove outliers 's' sigma above fit
'below' -- remove outliers 's' sigma below fit
'None'/None -- no outlier removal
xerr/yerr : one-sigma uncertainties in x and y. If these are
set, you are committing to an "orthogonal distance regression"
retodr : bool
If True, return the tuple (parameters, scipy_ODR_object)
catchLinAlgError : bool
If True, don't bomb on LinAlgError; instead, return [0, 0, ... 0].
:REQUIREMENTS:
:doc:`CARSMath`
:NOTES:
Iterates so long as n_newrejections>0 AND n_iter<fev.
"""
# 2008-10-01 13:01 IJC: Created & completed
# 2009-10-01 10:23 IJC: 1 year later! Moved "import" statements within func.
# 2009-10-22 14:01 IJC: Added 'clip' options for continuum fitting
# 2009-12-08 15:35 IJC: Automatically clip all non-finite points
# 2010-10-29 09:09 IJC: Moved pylab imports inside this function
# 2012-08-20 16:47 IJMC: Major change: now only reject one point per iteration!
# 2012-08-27 10:44 IJMC: Verbose < 0 now resets to 0
# 2013-05-21 23:15 IJMC: Added catchLinAlgError
# 2017-05-12 11:37 IJMC: Added option for orthogonal distance regression
from CARSMath import polyfitw
from numpy import polyfit, polyval, isfinite, ones
from numpy.linalg import LinAlgError
from pylab import plot, legend, title, figure
import scipy.odr as odr
if verbose < 0:
verbose = 0
xx = array(x, copy=False)
yy = array(y, copy=False)
noweights = (w is None) and (xerr is None) and (yerr is None)
if noweights:
ww = ones(xx.shape, float)
else:
ww = array(w, copy=False)
fitxy = False
if xerr is None and yerr is not None:
w = 1./np.array(yerr)**2
elif xerr is not None and yerr is not None:
fitxy = True
xerr = putvecsinarray(xerr).ravel()
yerr = putvecsinarray(yerr).ravel()
ii = 0
nrej = 1
if checkvals:
goodind = isfinite(xx)*isfinite(yy)
if noweights:
pass
elif fitxy:
goodind *= (np.isfinite(xerr) * np.isfinite(yerr))
else:
goodind *= isfinite(ww)
xx2 = xx[goodind]
yy2 = yy[goodind]
if fitxy:
xerr2 = xerr[goodind]
yerr2 = yerr[goodind]
else:
ww2 = ww[goodind]
if fitxy:
poly_model = odr.Model(np.polyval)
guess = polyfitr(xx2,yy2, N, s=s, fev=fev, w=1./yerr2**2)
while (ii<fev and (nrej<>0)):
if noweights:
p = polyfit(xx2,yy2,N)
residual = yy2 - polyval(p,xx2)
stdResidual = std(residual)
clipmetric = s * stdResidual
else:
if fitxy:
data = odr.RealData(xx2, yy2, sx=xerr2, sy=yerr2)
odrobj = odr.ODR(data, poly_model, beta0=guess, maxit=10000)
odrout = odrobj.run()
p = odrout.beta
residual = np.sqrt((odrout.delta / xerr2)**2 + (odrout.eps / yerr2)**2)
guess = p.copy()
else:
if catchLinAlgError:
try:
p = polyfitw(xx2,yy2, ww2, N)
except LinAlgError:
p = np.zeros(N+1, dtype=float)
else:
p = polyfitw(xx2,yy2, ww2, N)
p = p[::-1] # polyfitw uses reverse coefficient ordering
residual = (yy2 - polyval(p,xx2)) * np.sqrt(ww2)
clipmetric = s
if clip=='both':
worstOffender = abs(residual).max()
#pdb.set_trace()
if worstOffender <= clipmetric or worstOffender < eps:
ind = ones(residual.shape, dtype=bool)
else:
ind = abs(residual) < worstOffender
elif clip=='above':
worstOffender = residual.max()
if worstOffender <= clipmetric:
ind = ones(residual.shape, dtype=bool)
else:
ind = residual < worstOffender
elif clip=='below':
worstOffender = residual.min()
if worstOffender >= -clipmetric:
ind = ones(residual.shape, dtype=bool)
else:
ind = residual > worstOffender
else:
ind = ones(residual.shape, dtype=bool)
xx2 = xx2[ind]
yy2 = yy2[ind]
if fitxy:
xerr2 = xerr2[ind]
yerr2 = yerr2[ind]
elif (not noweights):
ww2 = ww2[ind]
ii = ii + 1
nrej = len(residual) - len(xx2)
if plotall:
figure()
plot(x,y, '.', xx2,yy2, 'x', x, polyval(p, x), '--')
legend(['data', 'fit data', 'fit'])
title('Iter. #' + str(ii) + ' -- Close all windows to continue....')
if verbose:
print str(len(x)-len(xx2)) + ' points rejected on iteration #' + str(ii)
if (plotfit or plotall):
figure()
plot(x,y, '.', xx2,yy2, 'x', x, polyval(p, x), '--')
legend(['data', 'fit data', 'fit'])
title('Close window to continue....')
if diag:
chisq = ( (residual)**2 / yy2 ).sum()
p = (p, chisq, ii)
if retodr:
ret = p, odrout
else:
ret = p
return ret
def spliner(x, y, k=3, sig=5, s=None, fev=100, w=None, clip='both', \
verbose=False, plotfit=False, plotall=False, diag=False):
"""Matplotlib's polyfit with sigma-clipping rejection.
Do a scipy.interpolate.UnivariateSpline of order k of y to x.
Points whose fit residuals exeed s standard deviations are
rejected and the fit is recalculated. Returned is a spline object.
Iterates so long as n_newrejections>0 AND n_iter<fev.
:OPTIONAL INPUTS:
err: a set of errors for the data
fev: number of function evaluations to call before stopping
'diag'nostic flag: Return the tuple (p, chisq, n_iter)
clip: 'both' -- remove outliers +/- 's' sigma from fit
'above' -- remove outliers 's' sigma above fit
'below' -- remove outliers 's' sigma below fit
"""
# 2010-07-05 13:51 IJC: Adapted from polyfitr
from numpy import polyfit, polyval, isfinite, ones
from scipy import interpolate
from pylab import plot, legend, title
xx = array(x, copy=True)
yy = array(y, copy=True)
noweights = (w is None)
if noweights:
ww = ones(xx.shape, float)
else:
ww = array(w, copy=True)
#ww = 1./err**2
ii = 0
nrej = 1
goodind = isfinite(xx)*isfinite(yy)*isfinite(ww)
#xx = xx[goodind]
#yy = yy[goodind]
#ww = ww[goodind]
while (ii<fev and (nrej<>0)):
spline = interpolate.UnivariateSpline(xx[goodind],yy[goodind],w=ww[goodind],s=s,k=k)
residual = yy[goodind] - spline(xx[goodind])
stdResidual = std(residual)
#if verbose: print stdResidual
if clip=='both':
ind = abs(residual) <= (sig*stdResidual)
elif clip=='above':
ind = residual < sig*stdResidual
elif clip=='below':
ind = residual > -sig*stdResidual
else:
ind = ones(residual.shape, bool)
goodind *= ind
#xx = xx[ind]
#yy = yy[ind]
#ww = ww[ind]
ii += 1
nrej = len(residual) - len(xx)
if plotall:
plot(x,y, '.', xx[goodind],yy[goodind], 'x', x, spline(x), '--')
legend(['data', 'fit data', 'fit'])
title('Iter. #' + str(ii) + ' -- Close all windows to continue....')
if verbose:
print str(len(x)-len(xx[goodind])) + ' points rejected on iteration #' + str(ii)
if (plotfit or plotall):
plot(x,y, '.', xx[goodind],yy[goodind], 'x', x, spline(x), '--')
legend(['data', 'fit data', 'fit'])
title('Close window to continue....')
if diag:
chisq = ( (residual)**2 / yy )[goodind].sum()
spline = (spline, chisq, ii, goodind)
return spline
def neworder(N):
"""Generate a random order the integers [0, N-1] inclusive.
"""
#2009-03-03 15:15 IJC: Created
from numpy import random, arange, int
from pylab import find
neworder = arange(N)
random.shuffle(neworder)
#print N
return neworder
def im2(data1, data2, xlab='', ylab='', tit='', bar=False, newfig=True, \
cl=None, x=[], y=[], fontsize=16):
"""Show two images; title will only be drawn for the top one."""
from pylab import figure, subplot, colorbar, xlabel, ylabel, title, clim
from nsdata import imshow
if newfig: figure()
subplot(211)
imshow(data1,x=x,y=y);
if clim<>None: clim(cl)
if bar: colorbar()
xlabel(xlab, fontsize=fontsize); ylabel(ylab, fontsize=fontsize)
title(tit)
subplot(212)
imshow(data2,x=x,y=y);
if clim<>None: clim(cl)
if bar: colorbar()
xlabel(xlab, fontsize=fontsize); ylabel(ylab, fontsize=fontsize)
return
def dumbconf(vec, sig, type='central', mid='mean', verbose=False):
"""
Determine two-sided and one-sided confidence limits, using sorting.
:INPUTS:
vec : sequence
1D Vector of data values, for which confidence levels will be
computed.
sig : scalar
Confidence level, 0 < sig < 1. If type='central', we return
the value X for which the range (mid-X, mid+x) encloses a
fraction sig of the data values.
:OPTIONAL INPUTS:
type='central' -- 'upper', 'lower', or 'central' confidence limits
mid='mean' -- compute middle with mean or median
:SEE_ALSO:
:func:`confmap` for 2D distributions
:EXAMPLES:
::
from numpy import random
from analysis import dumbconf
x = random.randn(10000)
dumbconf(x, 0.683) # ---> 1.0 (one-sigma)
dumbconf(3*x, 0.954) # ---> 6.0 (two-sigma)
dumbconf(x+2, 0.997, type='lower') # ---> -0.74
dumbconf(x+2, 0.997, type='upper') # ---> 4.7
Some typical values for a Normal (Gaussian) distribution:
========= ================
type confidence level
========= ================
one-sigma 0.6826895
2 sigma 0.9544997
3 sigma 0.9973002
4 sigma 0.9999366
5 sigma 0.9999994
========= ================
"""
#2009-03-25 22:47 IJC: A Better way to do it (using bisector technique)
# 2009-03-26 15:37 IJC: Forget bisecting -- just sort.
# 2013-04-25 12:05 IJMC: Return zero if vector input is empty.
# 2013-05-15 07:30 IJMC: Updated documentation.
from numpy import sort, array
vec = array(vec).copy()
sig = array([sig]).ravel()
if vec.size==0: return array([0])
#vec = sort(vec)
N = len(vec)
Ngoal = sig*N
if mid=='mean':
mid = vec.mean()
elif mid=='median':
mid = median(vec)
else:
try:
mid = mid + 0.0
except MidMethodException:
print "mid must be median, mean, or numeric in value!"
return -1
if type =='central':
vec2 = sort(abs(vec-mid))
elif type=='upper':
vec2 = sort(vec)
elif type=='lower':
vec2 = -sort(-vec)
else:
print "Invalid type -- must be central, upper, or lower"
return -1
ret = []
for ii in range(len(sig)):
ret.append(vec2[int(Ngoal[ii])])
return ret
def error_dropoff(data):
""" Calculates the dropoff in measurement uncertainty with increasing
number of samples (a random and uncorrelated set of data should
drop of as 1/sqrt[n] ).
E(0), the first returned element, always returns the uncertainty
in a single measurement (i.e., the standard deviation).
:EXAMPLE: Compute the errors on an array of 3 column vectors
::
data = randn(1000,3)
e = error_dropoff(data)
plot(e[1], 1./sqrt(e[1]), '-', e[1], e[0], 'x')
xlim([0,30])
legend(['Theoretical [1/sqrt(N)]', 'Computed errors'])
"""
# 2009-05-05 08:58 IJC: Adapted to Python from Matlab.
# 2006/06/06 IJC: Made it work with arrays of column vectors.
# Added the '--nomean' option.
# PARSE INPUTS
data = array(data).copy()
#interval = max([1 round(extract_from_options('--interval=', 1, options))]);
if len(data)==len(data.ravel()):
data = data.ravel()
data = data.reshape(len(data),1)
nsets = data.shape[1]
npts_vec = arange(data.shape[0]/2)+1.0
errors = zeros((data.shape[0]/2, nsets))
# LOOP AND CALCULATE STUFF
for ii in range(len(npts_vec)):
npts = npts_vec[ii] # number of points we average over
nsamp = floor(data.shape[0]/npts) # number of subsamples
dre = reshape(data[0:nsamp*npts,:], (npts, nsamp, nsets))
error_values = std(dre.mean(1))
errors[ii,:] = error_values
return (errors, npts_vec)
def binarray(img,ndown, axis=None):
"""downsample a 2D image
Takes a 1D vector or 2D array and reduce resolution by an integer factor
"ndown". This is done by binning the array -- i.e., integrating
over square blocks of pixels of width "ndown"
If keyword "axis" is None, bin over all axes. Otherwise, bin over
the single specified axis.
Note that 'ndown' can also be a sequence: e.g., [2, 1]
:EXAMPLE:
::
[img_ds]=binarray(img,ndown)
"""
# Renamed (and re-commented) by IJC 2007/01/31 from "downsample.m" to
# "binarray.m"
# 2009-05-06 14:15 IJC: Converted from Matlab to Python
# 2009-09-27 14:37 IJC: Added 1D vector support.
# 2010-09-27 16:08 IJC: ADded axis support.
# 2010-10-26 10:55 IJC: Much simpler algorithm for 1D case
# 2011-05-21 19:46 IJMC: Actually got axis support to work (2D
# only). Fixed indexing bug (in x, y meshgrid)
# 2012-09-08 18:50 IJMC: Big change -- 2D mode is now ~20x faster,
# owing to emphasis on array-based
# calculations.
# 2013-03-11 14:43 IJMC: Cast 'ndown' to int.
# 2013-04-04 11:48 IJMC: ndown can now be a sequence!
# 2014-03-13 16:24 IJMC: Now discard partially-filled rows &
# columns in bindown.
# 2014-09-04 11:20 IJMC: Totally overhauled 2D case to use
# array-only manipulations.
# 2014-09-24 23:15 IJMC: Fixed len-2 'ndown' input order.
if ndown==1:
return img
if not isinstance(img, np.ndarray):
img = np.array(img, copy=False)
if hasattr(ndown, '__iter__') and len(ndown)>1:
ndownx, ndowny = ndown[0:2]
else:
ndown, ndownx, ndowny = int(ndown), int(ndown), int(ndown)
if axis==0:
ndowny = 1
elif axis==1:
ndownx = 1
#if axis is None:
if img.ndim==2:
nrows0, ncols0 = img.shape
nrows = ndownx * np.floor(nrows0/ndownx)
ncols = ndowny * np.floor(ncols0/ndowny)
nel = nrows*ncols/ndownx/ndowny
img2 = img[0:nrows,0:ncols].reshape(nel,ndownx,ndowny).sum(2)
img_ds = img2.reshape(nrows/ndownx,ndownx, ncols/ndowny).sum(1)
elif img.ndim==1:
niter = np.floor(img.size / ndown)
img_ds = img[0:niter*ndown].reshape(niter, ndown).sum(1)
return img_ds
def fixval(arr, repval, retarr=False):
"""Fix non-finite values in a numpy array, and replace them with repval.
:INPUT:
arr -- numpy array, with values to be replaced.
repval -- value to replace non-finite elements with
:OPTIONAL INPUT:
retarr -- if False, changes values in arr directly (more
efficient). if True, returns a fixed copy of the input array,
which is left unchanged.
:example:
::
fixval(arr, -1)
"""
# 2009-09-02 14:07 IJC: Created
# 2012-12-23 11:49 IJMC: Halved run time.
if retarr:
arr2 = arr.ravel().copy()
else:
arr2 = arr.ravel()
finiteIndex = np.isfinite(arr2)
if not finiteIndex.any():
badIndex = find((1-finiteIndex))
arr2[badIndex] = repval
if retarr:
return arr2.reshape(arr.shape)
else:
return
def removeoutliers(data, nsigma, remove='both', center='mean', niter=Inf, retind=False, verbose=False):
"""Strip outliers from a dataset, iterating until converged.
:INPUT:
data -- 1D numpy array. data from which to remove outliers.
nsigma -- positive number. limit defining outliers: number of
standard deviations from center of data.
:OPTIONAL INPUTS:
remove -- ('min'|'max'|'both') respectively removes outliers
below, above, or on both sides of the limits set by
nsigma.
center -- ('mean'|'median'|value) -- set central value, or
method to compute it.
niter -- number of iterations before exit; defaults to Inf,
which can occasionally result in empty arrays returned
retind -- (bool) whether to return index of good values as
second part of a 2-tuple.
:EXAMPLE:
::
from numpy import hist, linspace, randn
from analysis import removeoutliers
data = randn(1000)
hbins = linspace(-5,5,50)
d2 = removeoutliers(data, 1.5, niter=1)
hist(data, hbins)
hist(d2, hbins)
"""
# 2009-09-04 13:24 IJC: Created
# 2009-09-24 17:34 IJC: Added 'retind' feature. Tricky, but nice!
# 2009-10-01 10:40 IJC: Added check for stdev==0
# 2009-12-08 15:42 IJC: Added check for isfinite
from numpy import median, ones, isfinite
def getcen(data, method):
"Get central value of a 1D array (helper function)"
if method.__class__==str:
if method=='median':
cen = median(data)
else:
cen = data.mean()
else:
cen = method
return cen
def getgoodindex(data, nsigma, center, stdev, remove):
"Get number of outliers (helper function!)"
if stdev==0:
distance = data*0.0
else:
distance = (data-center)/stdev
if remove=='min':
goodind = distance>-nsigma
elif remove=='max':
goodind = distance<nsigma
else:
goodind = abs(distance)<=nsigma
return goodind
data = data.ravel().copy()
ndat0 = len(data)
ndat = len(data)
iter=0
goodind = ones(data.shape,bool)
goodind *= isfinite(data)
while ((ndat0<>ndat) or (iter==0)) and (iter<niter) and (ndat>0) :
ndat0 = len(data[goodind])
cen = getcen(data[goodind], center)
stdev = data[goodind].std()
thisgoodind = getgoodindex(data[goodind], nsigma, cen, stdev, remove)
goodind[find(goodind)] = thisgoodind
if verbose:
print "cen>>",cen
print "std>>",stdev
ndat = len(data[goodind])
iter +=1
if verbose:
print ndat0, ndat
if retind:
ret = data[goodind], goodind
else:
ret = data[goodind]
return ret
def xcorr2_qwik(img0, img1):
"""
Perform quick 2D cross-correlation between two images.
Images must be the same size.
Computed via squashing the images along each dimension and
computing 1D cross-correlations.
"""
# 2009-12-17 10:13 IJC: Created. Based on idea by J. Johnson.
from numpy import zeros, max, min, sum
im00 = img0.sum(0)
im01 = img0.sum(1)
im10 = img1.sum(0)
im11 = img1.sum(1)
n0 = len(im00)
n1 = len(im01)
noffsets0 = 2*n0-1
noffsets1 = 2*n1-1
corr0 = zeros(noffsets0,float)
corr1 = zeros(noffsets1,float)
for ii in range(noffsets0):
firstind0 = max((ii-n0+1,0))
lastind0 = min((n0, ii+1))
firstind1 = max((n0-ii-1,0))
lastind1 = min((2*n0-ii-1,n0))
corr0[ii] = sum(im00[firstind0:lastind0]*im10[firstind1:lastind1])
for jj in range(noffsets1):
firstind0 = max((jj-n0+1,0))
lastind0 = min((n0, jj+1))
firstind1 = max((n0-jj-1,0))
lastind1 = min((2*n0-jj-1,n0))
corr1[jj] = sum(im01[firstind0:lastind0]*im11[firstind1:lastind1])
ret = find([corr0==corr0.max()])-n0+1, find([corr1==corr1.max()])-n0+1
return ret
def lsq(x, z, w=None, xerr=None, zerr=None, retcov=False, checkvals=True):
"""Do weighted least-squares fitting.
:INPUTS:
x : sequence
tuple of 1D vectors of equal lengths N, or the transposed
numpy.vstack of this tuple
z : sequence
vector of length N; data to fit to.
w : sequence
Either an N-vector or NxN array of weights (e.g., 1./sigma_z**2)
retcov : bool.
If True, also return covariance matrix.
checkvals : bool
If True, check that all values are finite values. This is
safer, but the array-based indexing slows down the function.
:RETURNS:
the tuple of (coef, coeferrs, {cov_matrix})"""
# 2010-01-13 18:36 IJC: Created
# 2010-02-08 13:04 IJC: Works for lists or tuples of x
# 2012-06-05 20:04 IJMC: Finessed the initial checking of 'x';
# updated documentation, cleared namespace.
# 2013-01-24 15:48 IJMC: Explicitly cast 'z' as type np.ndarray
# 2014-08-28 09:17 IJMC: Added 'checkvals' option.
# 2017-04-19 10:28 IJMC: Added option for errors in X and Y
# from numpy import vstack, dot, sqrt, isfinite,diag,ones,float, array, ndarray
# from numpy.linalg import pinv
fitxy = False
if xerr is None and zerr is not None:
w = 1./np.array(zerr)**2
elif xerr is not None and zerr is not None:
fitxy = True
xerr = putvecsinarray(xerr)
zerr = putvecsinarray(zerr)
if isinstance(x,tuple) or isinstance(x,list):
Xmat = np.vstack(x).transpose()
elif isinstance(x, np.ndarray) and x.ndim < 2:
Xmat = x.reshape(len(x),1)
else:
Xmat = np.array(x, copy=False)
z = np.array(z, copy=False)
if w is None:
w = np.diag(np.ones(Xmat.shape[0],float))
else:
w = np.array(w,copy=True)
if w.ndim < 2:
w = np.diag(w)
if checkvals:
goodind = np.isfinite(Xmat.sum(1))*np.isfinite(z)*np.isfinite(np.diag(w))
nelem, nvec = Xmat.shape
def linear_lsq_model(p, Xmatvec):
Xmat0 = Xmatvec.reshape(nelem, nvec)
return np.tile(np.dot(Xmat0, p), nvec)
if fitxy:
# Create a model for fitting.
lsq_model = odr.Model(linear_lsq_model)
tileZflat = np.tile(z, nvec)
tilegoodind = np.tile(goodind, nvec)
etileZflat = np.tile(zerr, nvec)
etileZflat[nelem:] *= ((etileZflat.max())*1e9)
# Create a RealData object using our initiated data from above.
if checkvals:
data = odr.RealData(Xmat.flatten()[tilegoodind], tileZflat[tilegoodind], sx=xerr.flatten()[tilegoodind], sy=etileZflat[tilegoodind])
else:
data = odr.RealData(Xmat.flatten(), tileZflat, sx=xerr.flatten(), sy=etileZflat)
guess, eguess = lsq(Xmat[goodind], z, checkvals=checkvals)
# Set up ODR with the model and data.
odr = odr.ODR(data, lsq_model, beta0=guess)
# Run the regression.
out = odr.run()
fitcoef, covmat = out.beta, out.cov_beta
else:
if checkvals:
Wmat = w[goodind][:,goodind]
XtW = np.dot(Xmat[goodind,:].transpose(),Wmat)
fitcoef = np.dot(np.dot(np.linalg.pinv(np.dot(XtW,Xmat[goodind,:])),XtW), z[goodind])
covmat = (np.linalg.pinv(np.dot(XtW, Xmat[goodind,:])))
else:
Wmat = w
XtW = np.dot(Xmat.transpose(),Wmat)
fitcoef = np.dot(np.dot(np.linalg.pinv(np.dot(XtW,Xmat)),XtW), z)
covmat = (np.linalg.pinv(np.dot(XtW, Xmat)))
efitcoef = np.sqrt(np.diag(covmat))
if retcov:
return fitcoef, efitcoef, covmat
else:
return fitcoef, efitcoef
def lsqsp(x, z, w=None, retcov=False):
"""Do weighted least-squares fitting on sparse matrices.
:INPUTS:
x : sparse matrix, shape N x M
data used in the least-squares fitting, a set of N rows of M
elements each.
z : sequence (shape N) or sparse matrix (shape N x 1)
data to fit to.
w : sequence (shape N) or sparse matrix (shape N x N)
Data weights and/or inverse covariances (e.g., 1./sigma_z**2)
#retcov : bool.
# If True, also return covariance matrix.
:RETURNS:
the tuple of (coef, coeferrs, {cov_matrix})
:SEE_ALSO:
:func:`lsq`
:REQUIREMENTS:
SciPy's `sparse` module.
"""
# 2012-09-17 14:57 IJMC: Created from lsq
from scipy import sparse
M, N = x.shape
#x2 = sparse.dia_matrix(x)
#pdb.set_trace()
if w is None:
w = sparse.dia_matrix((1./np.ones(M), 0), shape=(M, M))
if max(w.shape)==np.prod(w.shape): # w is 1D:
w = sparse.dia_matrix((w, 0), shape=(M, M))
z = sparse.csr_matrix(z)
if z.shape[0]==1:
z = z.transpose()
w = sparse.csr_matrix(w)
XtW = np.dot(x.transpose(), w)
pinv0 = sparse.csr_matrix(np.linalg.pinv(np.dot(XtW, x).todense()))
fitcoef = np.array(np.dot(np.dot(pinv0, XtW), z).todense()).squeeze()
covmat = np.linalg.pinv(np.dot(XtW, x).todense())
efitcoef = np.sqrt(covmat)
if retcov:
return fitcoef, efitcoef, covmat
else:
return fitcoef, efitcoef
def medianfilter(data, filtersize, threshold=None,verbose=False):
""" For now, we assume FILTERSIZE is odd, and that DATA is square!
filt = medianfilter(data, filtersize)
Note that filtersize can be a scalar (e.g., 5) to equally
median-filter along both axes, or a 2-vector (e.g., [5, 1]) to
apply a rectangular median-filter.
This is about the slowest way to do it, but it was easy to write.
"""
# 2006/02/01 IJC at the Jet Propulsion Laboratory
# 2010-02-18 13:52 IJC: Converted to python
# 2014-05-22 10:55 IJMC: fixed call to numpy.isfinite, 2D
# filtersize input handling.
from numpy import zeros, median, abs, std
print "Just use scipy.signal.medfilt !!!"
print "Just use scipy.signal.medfilt !!!"
print "Just use scipy.signal.medfilt !!!"
if len(filtersize)<1:
print 'medianfilter2 requires that filtersize be a 1- or 2-element vector'
return -1
elif len(filtersize)==1:
filtersize = [filtersize[0], filtersize[0]]
else:
filtersize = filtersize[0:2]
npix = data.shape[0]
npiy = data.shape[1]
bigsize = npix+2*(filtersize[0]-1)
bigdata = zeros((npix+filtersize[0],npiy+filtersize[1]),float)
#ind = filtersize[0]-1
#if ind==0:
# bigdata = data
#else:
bigdata[filtersize[0]/2:filtersize[0]/2+npix, filtersize[1]/2:filtersize[1]/2+npiy] = data
# FOR NOW, WE ASSUME FILTERSIZE IS ODD!!
# AND THAT DATA IS SQUARE!
niter_x = npix + (filtersize[0]-1)
niter_y = npiy + (filtersize[1]-1)
filt = zeros((niter_x,niter_y), float)
for ii in range(niter_x):
for jj in range(niter_y):
if verbose>1:
print "ii,jj>>",ii,jj
if filtersize[0]==1:
indi = 0
else:
indi = filtersize[0]-1
if filtersize[1]==1:
indj = 0
else:
indj = filtersize[1]-1
select = bigdata[ii:(ii+indi+1),jj:(jj+indj+1)].ravel()
select = select[np.isfinite(select)]
#residualSelection = abs(select - median(select))
if verbose:
print "select.shape>>",select.shape
print "threshold>>",threshold
if threshold is not None:
if threshold >= 0: # raw threshold
doFilter = abs(bigdata[ii,jj]-median(select))/std(select)>=threshold
elif threshold<0: # remove outliers before applying threshold
npts_init = len(select)
select = removeoutliers(select, abs(threshold), center='median')
npts_final = len(select)
if verbose>1:
print "threshold=",threshold,", removed %i points" % (npts_init-npts_final)
doFilter = abs(bigdata[ii,jj]-median(select))/std(select)>=threshold
else: # filter everything; threshold not set.
doFilter = True
if verbose:
print "doFilter?>>",doFilter
if verbose>1:
print "select>>",select
if doFilter:
newval = median( select )
else:
newval = bigdata[ii,jj]
if verbose>1:
print "newval>>",newval
filt[ii,jj] = newval
print filt.shape, [(filtersize[0]-1)/2,niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2,niter_y-(filtersize[0]-1)/2]
return filt[0:npix,0:npiy] #, filt[(filtersize[0]-1)/2:niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2:niter_y-(filtersize[0]-1)/2]
def stdfilt2d(data, filtersize, threshold=None,verbose=False):
""" For now, we assume FILTERSIZE is odd, and that DATA is square!
filt = stdfilt2d(data, filtersize)
This is about the slowest way to do it, but it was easy to write.
"""
# 2012-08-07 13:42 IJMC: Created from medianfilter
from numpy import zeros, median, abs, std, isfinite
if not hasattr(filtersize, '__iter__'):
filtersize = [filtersize]
if len(filtersize)<1:
print 'medianfilter2 requires that filtersize be a 1- or 2-element vector'
return -1
elif len(filtersize)==1:
filtersize = [filtersize[0], filtersize[0]]
else:
filtersize = filtersize[0:2]
npix = data.shape[0]
npiy = data.shape[1]
bigsize_x = npix+2*(filtersize[0]-1)
bigsize_y = npiy+2*(filtersize[1]-1)
bigdata = zeros((bigsize_x,bigsize_y),float)
ind = filtersize[0]-1
if ind==0:
bigdata = data
else:
bigdata[ind:(bigsize_x-ind), ind:(bigsize_y-ind)] = data
# FOR NOW, WE ASSUME FILTERSIZE IS ODD!!
# AND THAT DATA IS SQUARE!
niter_x = npix + (filtersize[0]-1)
niter_y = npiy + (filtersize[1]-1)
filt = zeros((niter_x,niter_y), float)
for ii in range(niter_x):
for jj in range(niter_y):
if verbose>1:
print "ii,jj>>",ii,jj
if filtersize[0]==1:
indi = 1
else:
indi = filtersize[0]-1
if filtersize[1]==1:
indj = 1
else:
indj = filtersize[1]-1
select = bigdata[ii:(ii+indi),jj:(jj+indj)].ravel()
#select = select[isfinite(select)]
#residualSelection = abs(select - median(select))
doFilter = True
if verbose:
print "doFilter?>>",doFilter
if verbose>1:
print "select>>",select
if doFilter:
newval = ( select ).std()
else:
newval = bigdata[ii,jj]
if verbose>1:
print "newval>>",newval
filt[ii,jj] = newval
#print filt.shape, [(filtersize[0]-1)/2,niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2,niter_y-(filtersize[0]-1)/2]
return filt[(filtersize[0]-1)/2:niter_x-(filtersize[0]-1)/2,(filtersize[0]-1)/2:niter_y-(filtersize[0]-1)/2]
def wmean(a, w, axis=None, reterr=False):
"""wmean(a, w, axis=None)
Perform a weighted mean along the specified axis.
:INPUTS:
a : sequence or Numpy array
data for which weighted mean is computed
w : sequence or Numpy array
weights of data -- e.g., 1./sigma^2
reterr : bool
If True, return the tuple (mean, err_on_mean), where
err_on_mean is the unbiased estimator of the sample standard
deviation.
:SEE ALSO: :func:`wstd`
"""
# 2008-07-30 12:44 IJC: Created this from ...
# 2012-02-28 20:31 IJMC: Added a bit of documentation
# 2012-03-07 10:58 IJMC: Added reterr option
newdata = array(a, subok=True, copy=True)
newweights = array(w, subok=True, copy=True)
if axis is None:
newdata = newdata.ravel()
newweights = newweights.ravel()
axis = 0
ash = list(newdata.shape)
wsh = list(newweights.shape)
nsh = list(ash)
nsh[axis] = 1
if ash<>wsh:
warn('Data and weight must be arrays of same shape.')
return []
wsum = newweights.sum(axis=axis).reshape(nsh)
weightedmean = (a * newweights).sum(axis=axis).reshape(nsh) / wsum
if reterr:
# Biased estimator:
#e_weightedmean = sqrt((newweights * (a - weightedmean)**2).sum(axis=axis) / wsum)
# Unbiased estimator:
#e_weightedmean = sqrt((wsum / (wsum**2 - (newweights**2).sum(axis=axis))) * (newweights * (a - weightedmean)**2).sum(axis=axis))
# Standard estimator:
e_weightedmean = np.sqrt(1./newweights.sum(axis=axis))
ret = weightedmean, e_weightedmean
else:
ret = weightedmean
return ret
def wstd(a, w, axis=None):
"""wstd(a, w, axis=None)
Perform a weighted standard deviation along the specified axis.
If axis=None, then the weighted standard deviation of the entire
array is computed.
Note that this computes the _sample_ standard deviation;
Numpy/Scipy computes the _population_ standard deviation, which is
greater by a factor sqrt(N/N-1). This effect is small for large
datasets.
:SEE ALSO: :func:`wmean`
Taken from http://en.wikipedia.org/wiki/Weighted_standard_deviation
"""
# 2008-07-30 12:44 IJC: Created this from
newdata = array(a, subok=True, copy=True)
newweights = array(w, subok=True, copy=True)
if axis is None:
newdata = newdata.ravel()
newweights = newweights.ravel()
axis = 0
ash = list(newdata.shape)
wsh = list(newweights.shape)
nsh = list(ash)
nsh[axis] = 1
if ash<>wsh:
warn('Data and weight must be arrays of same shape.')
return []
wsum = newweights.sum(axis=axis).reshape(nsh)
omega = 1.0 * wsum / (wsum**2 - (newweights**2).sum(axis=axis).reshape(nsh))
weightedstd = omega * (newweights * (newdata-wmean(newdata, newweights, axis=axis))**2 ).sum(axis=axis).reshape(nsh)
return sqrt(weightedstd)
def fmin(func, x0, args=(), kw=dict(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,
full_output=0, disp=1, retall=0, callback=None, zdelt = 0.00025, nonzdelt = 0.05,
holdfixed=None):
"""Minimize a function using the downhill simplex algorithm -- now with KEYWORDS.
:Parameters:
func : callable func(x,*args)
The objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple
Extra arguments passed to func, i.e. ``f(x,*args)``.
callback : callable
Called after each iteration, as callback(xk), where xk is the
current parameter vector.
:Returns: (xopt, {fopt, iter, funcalls, warnflag})
xopt : ndarray
Parameter that minimizes function.
fopt : float
Value of function at minimum: ``fopt = func(xopt)``.
iter : int
Number of iterations performed.
funcalls : int
Number of function calls made.
warnflag : int
1 : Maximum number of function evaluations made.
2 : Maximum number of iterations reached.
allvecs : list
Solution at each iteration.
*Other Parameters*:
xtol : float
Relative error in xopt acceptable for convergence.
ftol : number
Relative error in func(xopt) acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfun : number
Maximum number of function evaluations to make [200*len(x0)]
full_output : bool
Set to True if fval and warnflag outputs are desired.
disp : bool
Set to True to print convergence messages.
retall : bool
Set to True to return list of solutions at each iteration.
zdelt : number
Set the initial stepsize for x0[k] equal to zero
nonzdelt : number
Set the initial stepsize for x0[k] nonzero
holdfixed : sequence
Indices of x0 to hold fixed (e.g., [1, 2, 4])
:TBD: gprior : tuple or sequence of tuples
Set a gaussian prior on the indicated parameter, such that
chisq += ((x0[p] - val)/unc_val)**2, where the parameters
are defined by the tuple gprior=(param, val, unc_val)
:Notes:
Uses a Nelder-Mead simplex algorithm to find the minimum of
function of one or more variables.
"""
# 2011-04-13 14:26 IJMC: Adding Keyword option
# 2011-05-11 10:48 IJMC: Added the zdelt and nonzdelt options
# 2011-05-30 15:36 IJMC: Added the holdfixed option
def wrap_function(function, args, **kw):
ncalls = [0]
def function_wrapper(x):
ncalls[0] += 1
return function(x, *args, **kw)
return ncalls, function_wrapper
# Set up holdfixed arrays
if holdfixed is not None:
holdfixed = np.array(holdfixed)
#x0[holdfixed] = x0[holdfixed]
holdsome = True
else:
holdsome = False
#holdfixed = np.zeros(params.size, dtype=bool)
#if holdsome:
# print "holdfixed>>", holdfixed
fcalls, func = wrap_function(func, args, **kw)
x0 = np.asfarray(x0).flatten()
xoriginal = x0.copy()
N = len(x0)
rank = len(x0.shape)
if not -1 < rank < 2:
raise ValueError, "Initial guess must be a scalar or rank-1 sequence."
if maxiter is None:
maxiter = N * 200
if maxfun is None:
maxfun = N * 200
rho = 1; chi = 2; psi = 0.5; sigma = 0.5;
one2np1 = range(1,N+1)
if rank == 0:
sim = np.zeros((N+1,), dtype=x0.dtype)
else:
sim = np.zeros((N+1,N), dtype=x0.dtype)
fsim = np.zeros((N+1,), float)
sim[0] = x0
if retall:
allvecs = [sim[0]]
#print func.__name__
#print x0
fsim[0] = func(x0)
for k in range(0,N):
y = np.array(x0,copy=True)
if y[k] != 0:
y[k] = (1+nonzdelt)*y[k]
else:
y[k] = zdelt
if holdsome and k in holdfixed:
y[k] = xoriginal[k]
sim[k+1] = y
f = func(y)
fsim[k+1] = f
ind = np.argsort(fsim)
fsim = np.take(fsim,ind,0)
# sort so sim[0,:] has the lowest function value
sim = np.take(sim,ind,0)
iterations = 1
while (fcalls[0] < maxfun and iterations < maxiter):
### IJC Edit to understand fmin!
##print 'xtol>> ' + str(max(np.ravel(abs(sim[1:]-sim[0])))) + ' > ' + str(xtol)
##print 'ftol>> ' + str(max(abs(fsim[0]-fsim[1:]))) + ' > ' + str(ftol)
if (max(np.ravel(abs(sim[1:]-sim[0]))) <= xtol \
and max(abs(fsim[0]-fsim[1:])) <= ftol):
break
xbar = np.add.reduce(sim[:-1],0) / N
xr = (1+rho)*xbar - rho*sim[-1]
if holdsome:
xr[holdfixed] = xoriginal[holdfixed]
fxr = func(xr)
doshrink = 0
if fxr < fsim[0]:
xe = (1+rho*chi)*xbar - rho*chi*sim[-1]
if holdsome:
xe[holdfixed] = xoriginal[holdfixed]
fxe = func(xe)
if fxe < fxr:
sim[-1] = xe
fsim[-1] = fxe
else:
sim[-1] = xr
fsim[-1] = fxr
else: # fsim[0] <= fxr
if fxr < fsim[-2]:
sim[-1] = xr
fsim[-1] = fxr
else: # fxr >= fsim[-2]
# Perform contraction
if fxr < fsim[-1]:
xc = (1+psi*rho)*xbar - psi*rho*sim[-1]
if holdsome:
xc[holdfixed] = xoriginal[holdfixed]
fxc = func(xc)
if fxc <= fxr:
sim[-1] = xc
fsim[-1] = fxc
else:
doshrink=1
else:
# Perform an inside contraction
xcc = (1-psi)*xbar + psi*sim[-1]
if holdsome:
xcc[holdfixed] = xoriginal[holdfixed]
fxcc = func(xcc)
if fxcc < fsim[-1]:
sim[-1] = xcc
fsim[-1] = fxcc
else:
doshrink = 1
if doshrink:
for j in one2np1:
sim[j] = sim[0] + sigma*(sim[j] - sim[0])
if holdsome:
sim[j, holdfixed] = xoriginal[holdfixed]
fsim[j] = func(sim[j])
ind = np.argsort(fsim)
sim = np.take(sim,ind,0)
fsim = np.take(fsim,ind,0)
if callback is not None:
callback(sim[0])
iterations += 1
if retall:
allvecs.append(sim[0])
x = sim[0]
fval = min(fsim)
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
if disp:
print "Warning: Maximum number of function evaluations has "\
"been exceeded."
elif iterations >= maxiter:
warnflag = 2
if disp:
print "Warning: Maximum number of iterations has been exceeded"
else:
if disp:
print "Optimization terminated successfully."
print " Current function value: %f" % fval
print " Iterations: %d" % iterations
print " Function evaluations: %d" % fcalls[0]
if full_output:
retlist = x, fval, iterations, fcalls[0], warnflag
if retall:
retlist += (allvecs,)
else:
retlist = x
if retall:
retlist = (x, allvecs)
return retlist
def fmin_powell(func, x0, args=(), kw=dict(), xtol=1e-4, ftol=1e-4, maxiter=None,
maxfun=None, full_output=0, disp=1, retall=0, callback=None,
direc=None, holdfixed=None):
"""Minimize a function using modified Powell's method -- now with KEYWORDS.
:Parameters:
func : callable f(x,*args)
Objective function to be minimized.
x0 : ndarray
Initial guess.
args : tuple
Eextra arguments passed to func.
kw : dict
Keyword arguments passed to func.
callback : callable
An optional user-supplied function, called after each
iteration. Called as ``callback(xk)``, where ``xk`` is the
current parameter vector.
direc : ndarray
Initial direction set.
:Returns: (xopt, {fopt, xi, direc, iter, funcalls, warnflag}, {allvecs})
xopt : ndarray
Parameter which minimizes `func`.
fopt : number
Value of function at minimum: ``fopt = func(xopt)``.
direc : ndarray
Current direction set.
iter : int
Number of iterations.
funcalls : int
Number of function calls made.
warnflag : int
Integer warning flag:
1 : Maximum number of function evaluations.
2 : Maximum number of iterations.
allvecs : list
List of solutions at each iteration.
*Other Parameters*:
xtol : float
Line-search error tolerance.
ftol : float
Relative error in ``func(xopt)`` acceptable for convergence.
maxiter : int
Maximum number of iterations to perform.
maxfun : int
Maximum number of function evaluations to make.
full_output : bool
If True, fopt, xi, direc, iter, funcalls, and
warnflag are returned.
disp : bool
If True, print convergence messages.
retall : bool
If True, return a list of the solution at each iteration.
:Notes:
Uses a modification of Powell's method to find the minimum of
a function of N variables.
"""
# 2010-07-01 11:17 IJC: Added keyword option
from scipy import optimize
from numpy import asarray, eye, pi, squeeze
def wrap_function(function, args, **kw):
ncalls = [0]
def function_wrapper(x):
ncalls[0] += 1
return function(x, *args, **kw)
return ncalls, function_wrapper
def _linesearch_powell(func, p, xi, tol=1e-3):
"""Line-search algorithm using fminbound.
Find the minimium of the function ``func(x0+ alpha*direc)``.
"""
def myfunc(alpha):
return func(p + alpha * xi)
alpha_min, fret, iter, num = optimize.brent(myfunc, full_output=1, tol=tol)
xi = alpha_min*xi
return squeeze(fret), p+xi, xi
# Set up holdfixed arrays
if holdfixed is not None:
holdfixed = np.array(holdfixed)
#x0[holdfixed] = x0[holdfixed]
holdsome = True
else:
holdsome = False
#holdfixed = np.zeros(params.size, dtype=bool)
# we need to use a mutable object here that we can update in the
# wrapper function
fcalls, func = wrap_function(func, args, **kw)
x = asarray(x0).flatten()
xoriginal = x.copy()
if retall:
allvecs = [x]
N = len(x)
rank = len(x.shape)
if not -1 < rank < 2:
raise ValueError, "Initial guess must be a scalar or rank-1 sequence."
if maxiter is None:
maxiter = N * 1000
if maxfun is None:
maxfun = N * 1000
if direc is None:
direc = eye(N, dtype=float)
else:
direc = asarray(direc, dtype=float)
fval = squeeze(func(x))
x1 = x.copy()
iter = 0;
ilist = range(N)
while True:
fx = fval
bigind = 0
delta = 0.0
for i in ilist:
direc1 = direc[i]
fx2 = fval
if (not holdsome) or (i not in holdfixed):
fval, x, direc1 = _linesearch_powell(func, x, direc1, tol=xtol*100)
if (fx2 - fval) > delta:
delta = fx2 - fval
bigind = i
iter += 1
if callback is not None:
callback(x)
if retall:
allvecs.append(x)
if (2.0*(fx - fval) <= ftol*(abs(fx)+abs(fval))+1e-20): break
if fcalls[0] >= maxfun: break
if iter >= maxiter: break
# Construct the extrapolated point
direc1 = x - x1
x2 = 2*x - x1
if holdsome:
x2[holdfixed] = xoriginal[holdfixed]
x1 = x.copy()
fx2 = squeeze(func(x2))
if (fx > fx2):
t = 2.0*(fx+fx2-2.0*fval)
temp = (fx-fval-delta)
t *= temp*temp
temp = fx-fx2
t -= delta*temp*temp
if t < 0.0:
fval, x, direc1 = _linesearch_powell(func, x, direc1,
tol=xtol*100)
if holdsome:
x[holdfixed] = xoriginal[holdfixed]
direc[bigind] = direc[-1]
direc[-1] = direc1
warnflag = 0
if fcalls[0] >= maxfun:
warnflag = 1
if disp:
print "Warning: Maximum number of function evaluations has "\
"been exceeded."
elif iter >= maxiter:
warnflag = 2
if disp:
print "Warning: Maximum number of iterations has been exceeded"
else:
if disp:
print "Optimization terminated successfully."
print " Current function value: %f" % fval
print " Iterations: %d" % iter
print " Function evaluations: %d" % fcalls[0]
x = squeeze(x)
if full_output:
retlist = x, fval, direc, iter, fcalls[0], warnflag
if retall:
retlist += (allvecs,)
else:
retlist = x
if retall:
retlist = (x, allvecs)
return retlist
def gaussian2d(p, x, y):
""" Compute a 2D gaussian distribution at the points x, y.
:INPUTS:
p : sequence
a four- or five-component array, list, or tuple:
z = [p4 +] p0/(2*pi*p1**2) * exp(-((x-p2)**2+(y-p3)) / (2*p1**2))
p[0] -- Area of the gaussian
p[1] -- one-sigma dispersion
p[2] -- x-central offset
p[3] -- y-central offset
p[4] -- optional constant, vertical offset
x : NumPy array
X-coordinate values at which the above relation will be computed.
y : NumPy array
Y-coordinate values at which the above relation will be computed.
:SEE_ALSO:
:func:`gaussian` (1D)
"""
#2010-06-08 20:00 IJC: Created
#2013-04-19 23:49 IJMC: Improved documentation, per EACM's request.
#2015-05-31 16:53 IJMC: no longer copy x & y inputs (slight speed boost)
x = np.array(x, dtype=float, copy=False)
y = np.array(y, dtype=float, copy=False)
p = np.array(p).copy()
if len(p)==4:
p = np.concatenate((p, [0]))
z = p[4] + p[0]/(2*pi*p[1]**2) * np.exp(-((x-p[2])**2 + (y-p[3])**2) / (2*p[1]**2))
return z
def gaussiannd(mu, cov, x):
""" Compute an N-dimensional gaussian distribution at the position x.
mu is the length-N 1D vector of mean positions
cov is the NxN covariance matrix of the multinormal distribution.
x are the positions at which to compute. If X is 2D (size M x
N), it is taken as M sets of N-point positions.
SEE ALSO: :func:`gaussian`, :func:`gaussian2d`
"""
#2012-05-08 11:36 IJMC: Created
x = np.array(x, dtype=float, copy=False)
mu = np.array(mu, dtype=float, copy=False)
cov = np.array(cov, dtype=float, copy=True)
if x.ndim==1:
nx = x.size
niter = 0
elif x.ndim==2:
niter, nx = x.shape
if cov.size==1:
cov = cov.reshape((1,1))
# Test if cov is square:
# invert
invcov = np.linalg.inv(cov)
# Compute mean vector:
if niter==0:
xmu = (x - mu).reshape(nx, 1)
term1 = ((2*np.pi)**(nx/2.) * np.sqrt(np.linalg.det(cov)))
term2 = np.exp(-0.5 * np.dot(xmu.transpose(), np.dot(invcov, xmu)))
ret = term2 / term1
else:
for ii in range(niter):
xmu = (x[ii] - mu).reshape(nx, 1)
term1 = ((2*np.pi)**(nx/2.) * np.sqrt(np.linalg.det(cov)))
term2 = np.exp(-0.5 * np.dot(xmu.transpose(), np.dot(invcov, xmu)))
ret[ii] = term2 / term1
ret = np.zeros(nx, dtype=float)
return term2 / term1
def gaussian2d_ellip(p, x, y):
""" Compute a 2D elliptical gaussian distribution at the points x, y.
p is a 5-, 6-, or 7-component sequence, defined as:
p[0] -- Amplitude (Area of the function)
p[1] -- x-dispersion
p[2] -- y-dispersion
p[3] -- x-central offset
p[4] -- y-central offset
p[5] -- optional rotation angle (radians)
p[6] -- optional constant, vertical offset
X, Y are gridded data from :func:`numpy.meshgrid`
First define:
x' = (x - p[3]) cos p[5] - (y - p[4]) sin p[5]
y' = (x - p[3]) sin p[5] + (y - p[4]) cos p[5]
Then calculate:
U = (x' / p[1])**2 + (y' / p[2])**2
z = p[6] + p0/(2*pi*p1*p2) * exp(-U / 2)
SEE ALSO: :func:`gaussian2d`, :func:`lorentzian2d` """
#2012-02-11 18:06 IJMC: Created from IDL GAUSS2DFIT
#2014-08-28 09:43 IJMC: Calculate rotated coords using np.dot, use
# np.array instead of np.vstack -- for speed
# boost.
x = np.array(x)
sh = x.shape
x = x.ravel()
y = np.array(y).ravel()
#p = array(p).copy()
if len(p)==5:
p = np.concatenate((p, [0, 0]))
elif len(p)==6:
p = np.concatenate((p, [0]))
cp, sp = np.cos(p[5]), np.sin(p[5]) # This gives a slight speed boost.
rotationMatrix = np.array([[cp, -sp], \
[sp, cp]])
xp,yp = np.dot(rotationMatrix, np.array((x-p[3],y-p[4])) )
return (p[6] + p[0]/(2*pi*p[1]*p[2]) * np.exp(-0.5 * ((xp / p[1])**2 + (yp / p[2])**2)) ).reshape(sh)
def lorentzian2d(p, x, y):
""" Compute a 2D Lorentzian distribution at the points x, y.
p is a 5-, 6-, or 7--component sequence:
z = (x-p3) ** 2 / p1 ** 2 + (y-p4) ** 2 / p2 ** 2 [ + (x-p3) * (y-p4) * p5 ]
lorentz = p0 / (1.0 + z) [ + p6]
p[0] -- Amplitude (Area of the function)
p[1] -- x-dispersion
p[2] -- y-dispersion
p[3] -- x-central offset
p[4] -- y-central offset
p[5] -- optional ellipticitity parameter
p[6] -- optional constant, vertical offset
SEE ALSO: :func:`gaussian2d`
"""
#2012-02-04 11:38 IJMC: Created
x = array(x, dtype=float).copy()
y = array(y, dtype=float).copy()
p = array(p).copy()
if len(p)==5:
p = concatenate((p, [0, 0]))
elif len(p)==6:
p = concatenate((p, [0]))
z = ((x - p[3]) / p[1])**2 + ((y - p[4]) / p[2])**2 + p[5] * (x - p[3]) * (y - p[4])
return p[6] + p[0]/(1. + z)
def egaussian2d(p,x,y,z,w=None):
""" Return the error associated with a 2D gaussian fit, using gaussian2d.
w is an array of weights, typically 1./sigma**2"""
# 2010-06-08 20:02 IJC: Created
from numpy import ones, array
x = array(x, dtype=float).copy()
if w is None:
w = ones(w.shape,float)
z0 = gaussian2d(p,x,y)
return (((z-z0)*w)**2).sum()
def getblocks(vec):
"""Return start and end indices for consecutive sequences of
integer-valued indices.
:Example:
::
import analysis as an
vec = range(5) +range(10,14) + range(22,39)
starts,ends = an.getblocks(vec)
print zip(starts,ends)
"""
# 2010-08-18 17:01 IJC: Created
# 2010-11-15 23:26 IJC: Added numpy imports
from numpy import sort, diff, nonzero
vec = sort(vec)
starts = [vec[0]]
ends = []
dvec = diff(vec)
start_inds = nonzero(dvec>1)[0]
for ind in start_inds:
starts.append(vec[ind+1])
ends.append(vec[ind])
ends.append(vec[-1])
return starts, ends
def snr(data, axis=None, nsigma=None):
"""
Compute the quantity:
data.mean(axis=axis)/data.std(axis=axis)
for the specified data array/vector along the specified axis.
'nsigma' is used to reject outliers.
Output will be a scalar (axis is None) or numpy array, as
appropriate.
"""
# 2010-09-02 08:10 IJC: Created
# 2011-12-16 14:51 IJMC: Added optional nsigma flag
# 2014-07-27 14:09 IJMC: Briefly documented nsigma flag.
data = array(data)
if nsigma is None:
ret = data.mean(axis=axis)/data.std(axis=axis)
else:
ret = meanr(data, axis=axis, nsigma=nsigma) / \
stdr(data, axis=axis, nsigma=nsigma)
return ret
def pad(inp, npix_rows, npix_cols=None):
"""Pads input matrix to size specified.
::
out = pad(in, npix)
out = pad(in, npix_rows, npix_cols); # alternate usage
Written by J. Green @ JPL; converted to Python by I. Crossfield"""
#2008-10-18 12:50 IJC: Converted from Matlab function
# 2010-10-29 09:35 IJC: Moved from nsdata.py to analysis.py
from numpy import imag, zeros, complex128
inp = array(inp, copy=True)
if len(inp.shape)==0:
inp = inp.reshape((1,1))
elif len(inp.shape)==1:
inp = inp.reshape((1, len(inp)))
if npix_cols is None:
npix_cols = npix_rows
if (imag(inp)**2).sum()==0:
out = zeros((npix_rows, npix_cols))
else:
out = zeros((npix_rows, npix_cols), complex128)
nrows, ncols = inp.shape
ixc = floor(ncols/2 + 1);
iyc = floor(nrows/2 + 1);
oxc = floor(npix_cols/2 + 1);
oyc = floor(npix_rows/2 + 1);
dx = npix_cols-ncols;
dy = npix_rows-nrows;
if dx<=0:
ix1 = ixc - floor(npix_cols/2);
ix2 = ix1 + npix_cols - 1;
ox1 = 1;
ox2 = npix_cols;
else:
ix1 = 1;
ix2 = ncols;
ox1 = oxc - floor(ncols/2);
ox2 = ox1 + ncols - 1;
if dy<=0:
iy1 = iyc - floor(npix_rows/2);
iy2 = iy1 + npix_rows - 1;
oy1 = 1;
oy2 = npix_rows;
else:
iy1 = 1;
iy2 = nrows;
oy1 = oyc - floor(nrows/2);
oy2 = oy1 + nrows - 1;
out[ oy1-1:oy2, ox1-1:ox2] = inp[ iy1-1:iy2, ix1-1:ix2];
# Uncomment for testing
# print inp
# print ixc, iyc, iy1, iy2, ix1, ix2
# print oxc, oyc, oy1, oy2, ox1, ox2
return out
def fftfilter1d(vec, bandwidth, retfilter=False):
""" Apply a hard-edged low-pass filter to an input vector.
:INPUTS:
vec -- sequence -- 1D vector, assumed to be evenly sampled
bandwidth -- integer -- size of the filter passband in cycles
per signal duration. f <= bandwidth is
passed through; f > bandwidth is
rejected.
retfilter -- bool -- if True, return the tuple (filtered_vec, filter)
:OUTPUT:
Lopass-filtered version of vec
:NOTE:
Assumes the input is real-valued.
"""
# 2011-03-11 14:15 IJC: Created
from numpy import real, fft, floor, ceil
vec = array(vec, copy=True)
# Errorchecking
if len(vec.shape)<>1:
print "Input array must be 1D -- try using .ravel()"
return -1
npts = vec.size
filter = concatenate((zeros(floor(npts/2.) - bandwidth),
ones(bandwidth * 2 + 1),
zeros(ceil(npts/2.) - bandwidth - 1)))
ret = real(fft.ifft(fft.ifftshift( fft.fftshift(fft.fft(vec)) * filter )))
if retfilter==True:
ret = [ret, filter]
return ret
def stdres(data, bins=None, oversamp=None, dataindex=None):
"""Compute the standard deviation in the residuals of a data
series after average-binning by specified amounts.
:INPUTS:
data - 1D numpy array
Data to analyze.
bins - sequence
Factors by which to bin down. If None, use 1:sqrt(data.size)
and set dataindex=None.
oversamp - int
Number of times to shift, resample, and bin the data. Large
values take longer, but give a less "noisy" estimate (which
can be a problem at large bin sizes)
dataindex - 1D numpy array
Values across which data are indexed (i.e., 'time'). If not
None, bins apply to dataindex rather than to data and should
be increasing intervals of dataindex (rather than the number
of points to bin down by).
:REQUIREMENTS:
:doc:`tools`, :doc:`numpy`
:EXAMPLE:
::
import numpy as np
import analysis as an
import pylab as py
npts = 1e4
t = np.arange(npts)
data = np.random.normal(size=npts)
binfactors = np.arange(1, npts/2.+1)
bindown_result = an.stdres(data, binfactors, dataindex=t, oversamp=1)
py.figure()
py.subplot(211)
py.plot(t, data, 'k')
py.xlabel('Time')
py.ylabel('Data value')
py.minorticks_on()
py.title('Bin-down test: Gaussian Noise')
py.subplot(212)
py.loglog(binfactors, bindown_result, '-b', linewidth=2)
py.loglog(binfactors, data.std()/np.sqrt(binfactors), '--r')
py.xlabel('Binning factor')
py.ylabel('RMS of binned data')
py.legend(['Binned RMS', '1/sqrt(N)'])
"""
# 2011-06-16 16:50 IJMC: Created
# 2012-03-20 14:33 IJMC: Added oversamp option.
# 2012-03-22 09:21 IJMC: Added dataindex option.
# 2012-04-30 06:50 IJMC: Changed calling syntax to errxy.
from tools import errxy
from numpy import isfinite
ndata = data.size
if bins is None:
bins = arange(1, sqrt(int(ndata)))
dataindex = None
nout = len(bins)
if oversamp is None:
invoversamp = 1
oversamp = 1
else:
invoversamp = 1./oversamp
ret = zeros(nout, dtype=float)
if dataindex is None:
for jj, binfactor in enumerate(bins):
if binfactor > 0:
sample_shifts = arange(0., binfactor, binfactor * invoversamp).astype(int)
for kk in range(oversamp):
ret[jj] += binarray(data[sample_shifts[kk]::], binfactor).std()
else:
ret[jj] = data.std()*oversamp
else:
startval = dataindex.min()
endval = dataindex.max()
for jj, binwidth in enumerate(bins):
if binwidth > 0:
thesebins = arange(startval, endval+binwidth, binwidth) - binwidth/2.
for kk in range(oversamp):
di2,d2,edi2,ed2 = errxy(dataindex, data, thesebins + binwidth*kk/oversamp, xerr=None, yerr=None, xmode=None,ymode='mean')
if isfinite(d2).all():
ret[jj] += d2.std()
else:
ret[jj] += d2[isfinite(d2)].std()
else:
ret[jj] = data.std()*oversamp
ret /= oversamp
return ret
def allanvariance(data, dt=1):
"""Compute the Allan variance on a set of regularly-sampled data (1D).
If the time between samples is dt and there are N total
samples, the returned variance spectrum will have frequency
indices from 1/dt to (N-1)/dt."""
# 2008-07-30 10:20 IJC: Created
# 2011-04-08 11:48 IJC: Moved to analysis.py
newdata = array(data, subok=True, copy=True)
dsh = newdata.shape
newdata = newdata.ravel()
nsh = newdata.shape
alvar = zeros(nsh[0]-1, float)
for lag in range(1, nsh[0]):
alvar[lag-1] = mean( (newdata[0:-lag] - newdata[lag:])**2 )
return (alvar*0.5)
def trueanomaly(ecc, eanom=None, manom=None):
"""Calculate (Keplerian, orbital) true anomaly.
One optional input must be given.
:INPUT:
ecc -- scalar. orbital eccentricity.
:OPTIONAL_INPUTS:
eanom -- scalar or Numpy array. Eccentric anomaly. See
:func:`eccentricanomaly`
manom -- scalar or sequence. Mean anomaly, equal to
2*pi*(t - t0)/period
"""
# 2011-04-22 14:35 IJC: Created
if manom is not None:
eanom = eccentricanomaly(ecc, manom=manom)
if eanom is not None:
ret = 2. * np.arctan( np.sqrt((1+ecc)/(1.-ecc)) * np.tan(eanom/2.) )
else:
ret = None
return ret
def eccentricanomaly(ecc, manom=None, tanom=None, tol=1e-8):
"""Calculate (Keplerian, orbital) eccentric anomaly.
One optional input must be given.
:INPUT:
ecc -- scalar. orbital eccentricity.
:OPTIONAL_INPUTS:
manom -- scalar or sequence. Mean anomaly, equal to
2*pi*(t - t0)/period
tanom -- scalar or Numpy array. True anomaly. See
:func:`trueanomaly`.
"""
# 2011-04-22 14:35 IJC: Created
ret = None
if manom is not None:
if not hasattr(manom, '__iter__'):
mwasscalar = True
manom = [manom]
else:
mwasscalar = False
# Solve Kepler's equation for each element of mean anomaly:
e = np.zeros(len(manom)) # Initialize eccentric anomaly
for ii,element in enumerate(manom):
def kep(e): return element - e + ecc*sin(e)
e[ii] = optimize.newton(kep, 0., tol=tol)
if mwasscalar:
e = e[0]
ret = e
elif tanom is not None:
ret = 2. * np.arctan(np.tan(0.5 * tanom) / \
np.sqrt((1. + ecc) / (1. - ecc)))
else:
ret = None
return ret
def gaussian(p, x):
""" Compute a gaussian distribution at the points x.
p is a three- or four-component array, list, or tuple:
y = [p3 +] p0/(p1*sqrt(2pi)) * exp(-(x-p2)**2 / (2*p1**2))
p[0] -- Area of the gaussian
p[1] -- one-sigma dispersion
p[2] -- central offset (mean location)
p[3] -- optional constant, vertical offset
NOTE: FWHM = 2*sqrt(2*ln(2)) * p1 ~ 2.3548*p1
SEE ALSO: :func:`egaussian`"""
#2008-09-11 15:11 IJC: Created for LINEPROFILE
# 2011-05-18 11:46 IJC: Moved to analysis.
# 2013-04-11 12:03 IJMC: Tried to speed things up slightly via copy=False
# 2013-05-06 21:42 IJMC: Tried to speed things up a little more.
if not isinstance(x, np.ndarray):
x = array(x, dtype=float, copy=False)
if len(p)==3:
p = array(p, copy=True)
p = concatenate((p, [0]))
#elif len(p)==4:
# p = array(p, copy=False)
return p[3] + p[0]/(p[1]*sqrt(2*pi)) * exp(-(x-p[2])**2 / (2*p[1]**2))
def doubleGaussian(p, x):
""" Compute the sum of two gaussian distributions at the points x.
p is a six- or seven-component sequence:
y = [p6 +] p0/(p1*sqrt(2pi)) * exp(-(x-p2)**2 / (2*p1**2)) +
p3/(p4*sqrt(2pi)) * exp(-(x-p5)**2 / (2*p4**2))
p[0] -- Area of gaussian A
p[1] -- one-sigma dispersion of gaussian A
p[2] -- central offset (mean location) of gaussian A
p[3] -- Area of gaussian B
p[4] -- one-sigma dispersion of gaussian B
p[5] -- central offset (mean location) of gaussian B
p[6] -- optional constant, vertical offset
NOTE: FWHM = 2*sqrt(2*ln(2)) * p1 ~ 2.3548*p1
SEE ALSO: :func:`gaussian`
"""
# 2013-05-06 20:29 IJMC: Created
x = array(x, dtype=float, copy=False)
return gaussian(p[0:3], x) + gaussian(p[3:], x)
def doubleGaussianCen(p, x, mu1, mu2):
""" Compute the sum of two gaussian distributions at the points x.
The distributions have central moments mu1 and mu2.
Useful for fitting to partially blended spectral data.
p is a four- or five-component sequence:
y = [p6 +] p0/(p1*sqrt(2pi)) * exp(-(x-mu1)**2 / (2*p1**2)) +
p3/(p4*sqrt(2pi)) * exp(-(x-mu2)**2 / (2*p4**2))
p[0] -- Area of gaussian A
p[1] -- one-sigma dispersion of gaussian A
p[2] -- Area of gaussian B
p[3] -- one-sigma dispersion of gaussian B
p[4] -- optional constant, vertical offset
mu1 -- central offset (mean location) of gaussian A
mu2 -- central offset (mean location) of gaussian B
NOTE: FWHM = 2*sqrt(2*ln(2)) * p1 ~ 2.3548*p1
SEE ALSO: :func:`doubleGaussian`, :func:`gaussian`
"""
# 2013-05-06 20:29 IJMC: Created
x = array(x, dtype=float, copy=False)
param1 = [p[0], p[1], mu1, 0]
if len(p)==4:
param2 = [p[2], p[3], mu2, 0]
elif len(p)==5:
param2 = [p[2], p[3], mu2, p[4]]
return gaussian(param1, x) + gaussian(param2, x)
def nGaussianCen(p, x, mu):
""" Compute the sum of N gaussian distributions at the points x.
The distributions have central moments defined by the vector mu.
Useful for fitting to partially blended spectral data when you
have good measurements of positions (i.e., from 2D tracing).
p is a sequence of length (2N+1). If N=2:
y = [p6 +] p0/(p1*sqrt(2pi)) * exp(-(x-mu1)**2 / (2*p1**2)) +
p3/(p4*sqrt(2pi)) * exp(-(x-mu2)**2 / (2*p4**2))
p[0] -- Area of gaussian 1
p[1] -- one-sigma dispersion of gaussian 1
p[2] -- Area of gaussian 2
p[3] -- one-sigma dispersion of gaussian 2
... etc.
p[-1] -- optional constant, vertical offset
and
mu1 -- central offset (mean location) of gaussian A
mu2 -- central offset (mean location) of gaussian B
NOTE: FWHM = 2*sqrt(2*ln(2)) * p1 ~ 2.3548*p1
SEE ALSO: :func:`doubleGaussian`, :func:`gaussian`
"""
# 2013-05-06 20:29 IJMC: Created
x = array(x, dtype=float, copy=False)
ret = np.zeros(x.size)
ngaussians = int(len(p)/2)
for ii in xrange(ngaussians):
ret += gaussian([p[ii*2], p[ii*2+1], mu[ii], 0], x)
if len(p)/2.<>len(p)/2: # P is odd, so the last value is our
ret += p[-1] # additive constant.
return ret
def egaussian(p, x, y, e=None):
""" Compute the deviation between the values y and the gaussian defined by p, x:
p is a three- or four-component array, list, or tuple.
Returns: y - p3 - p0/(p1*sqrt(2pi)) * exp(-(x-p2)**2 / (2*p1**2))
if an error array, e (typ. one-sigma) is entered, the returned value is divided by e.
SEE ALSO: :func:`gaussian`"""
# 2008-09-11 15:19 IJC: Created
# 2009-09-02 15:20 IJC: Added weighted case
# 2011-05-18 11:46 IJMC: Moved to analysis.
from numpy import ones
if e is None:
e=ones(x.shape)
fixval(e,y.max()*1e10)
z = (y - gaussian(p, x))/e
fixval(z,0)
return z
def generic_mcmc(*arg, **kw):
"""Run a Markov Chain Monte Carlo (Metropolis-Hastings algorithm)
on an arbitrary function.
:INPUTS:
EITHER:
func : function to generate model.
First argument must be "params;" subsequent arguments are
passed in via the "args" keyword
params : 1D sequence
parameters to be fit
stepsize : 1D or 2D array
If 1D: 1-sigma change in parameter per iteration
If 2D: covariance matrix for parameter changes.
z : 1D array
Contains dependent data (to be modeled)
sigma : 1D array
Contains standard deviation (errors) of "z" data
numit : int
Number of iterations to perform
OR:
(allparams, (arg1, arg2, ...), numit)
where allparams is a concatenated list of parameters for each
of several functions, and the arg_i are tuples of (func_i,
stepsize_i, z_i, sigma_i). In this case the keyword 'args'
must also be a tuple of sequences, one for each function to be
MCMC'ed.
:OPTIONAL_INPUTS:
args : 1D sequence
Second, third, etc.... arguments to "func"
nstep : int
Saves every "nth" step of the chain
posdef : None, 'all', or sequences of indices.
Which elements should be restricted to positive definite?
If indices, it should be of the form (e.g.): [0, 1, 4]
holdfixed : None, or sequences of indices.
Which elements should be held fixed in the analysis?
If indices, it should be of the form (e.g.): [0, 1, 4]
jointpars : None, or sequence of 2-tuples.
Only for simultaneous multi-function fitting. For
each pair of values passed, we set the parameters
values so: allparams[pair[1]] = allparams[pair[0]]
:OUTPUTS:
allparams : 2D array
Contains all parameters at each step
bestp : 1D array
Contains best paramters as determined by lowest Chi^2
numaccept: int
Number of accepted steps
chisq: 1D array
Chi-squared value at each step
:REFERENCES:
Numerical Recipes, 3rd Edition (Section 15.8)
Wikipedia
:NOTES:
If you need an efficient MCMC algorithm, you should be using
http://danfm.ca/emcee/
"""
# 2011-06-07 07:50 IJMC: Created from various other MCMC codes,
# eventually deriving from K. Stevenson's
# sample code.
# 2011-06-14 09:48 IJMC: Allow for covariance matrix pass-in to stepsize
# 2011-06-27 17:39 IJMC: Now link joint parameters for initial chisq.
# 2011-09-16 13:31 IJMC: Fixed bug for nextp when nfits==1
# 2011-11-02 22:08 IJMC: Now cast numit as an int
import numpy as np
# Parse keywords/optional inputs:
defaults = dict(args=(), nstep=1, posdef=None, holdfixed=None, \
jointpars=None, verbose=False)
for key in defaults:
if (not kw.has_key(key)):
kw[key] = defaults[key]
args = kw['args']
nstep = kw['nstep']
posdef = kw['posdef']
holdfixed = kw['holdfixed']
jointpars = kw['jointpars']
verbose = kw['verbose']
# Parse inputs:
if len(arg)==6:
func, params, stepsize, z, sigma, numit = arg
stepsize = np.array(stepsize, copy=True)
weights = 1./sigma**2
nfits = 1
elif len(arg)==3:
params, allargs, numit = arg[0:3]
nfits = len(allargs)
funcs = []
stepsizes = []
zs = []
multiargs = []
multiweights = []
npars = []
for ii, these_args in enumerate(allargs):
funcs.append(these_args[0])
stepsizes.append(np.array(these_args[1], copy=True))
zs.append(these_args[2])
multiweights.append(1./these_args[3]**2)
multiargs.append(args[ii])
npars.append(stepsizes[-1].shape[0])
else:
print "Must pass either 3 or 6 parameters as input."
print "You passed %i." % len(arg)
return -1
#Initial setup
numaccept = 0
numit = int(numit)
nout = numit/nstep
bestp = np.copy(params)
original_params = np.copy(params)
allparams = np.zeros((len(params), nout))
allchi = np.zeros(nout,float)
# Set indicated parameters to be positive definite:
if posdef=='all':
params = np.abs(params)
posdef = np.arange(params.size)
elif posdef is not None:
posdef = np.array(posdef)
params[posdef] = np.abs(params[posdef])
else:
posdef = np.zeros(params.size, dtype=bool)
# Set indicated parameters to be held fixed:
if holdfixed is not None:
holdfixed = np.array(holdfixed)
params[holdfixed] = np.abs(params[holdfixed])
else:
holdfixed = np.zeros(params.size, dtype=bool)
if verbose:
print params[posdef]
# Set joint parameters:
if jointpars is not None:
for jp in jointpars:
params[jp[1]] = params[jp[0]]
#Calc chi-squared for model using current params
if nfits==1:
zmodel = func(params, *args)
currchisq = (((zmodel - z)**2)*weights).ravel().sum()
bestchisq = currchisq
else:
tempchisq = 0
for ii in range(nfits):
i0 = sum(npars[0:ii])
i1 = i0 + npars[ii]
this_zmodel = funcs[ii](params[i0:i1], *multiargs[ii])
thischisq = (((this_zmodel - zs[ii])**2) * multiweights[ii]).ravel().sum()
tempchisq += thischisq
currchisq = tempchisq
bestchisq = currchisq
if verbose:
print currchisq
#Run Metropolis-Hastings Monte Carlo algorithm 'numit' times
for j in range(numit):
#Take step in random direction for adjustable parameters
if nfits==1:
if len(stepsize.shape)==1:
nextp = np.array([np.random.normal(params,stepsize)]).ravel()
else:
nextp = np.random.multivariate_normal(params, stepsize)
else:
nextstep = np.zeros(len(params), dtype=float)
for ii in range(nfits):
i0 = sum(npars[0:ii])
i1 = i0 + npars[ii]
if len(stepsizes[ii].shape)==1:
nextstep[i0:i1] = np.random.normal([0]*npars[ii], stepsizes[ii])
else:
nextstep[i0:i1] = np.random.multivariate_normal([0]*npars[ii], stepsizes[ii])
nextp = params + nextstep
# Constrain the desired parameters:
nextp[posdef] = np.abs(nextp[posdef])
nextp[holdfixed] = original_params[holdfixed]
if jointpars is not None:
for jp in jointpars:
nextp[jp[1]] = nextp[jp[0]]
#print nextp[jp[1]], nextp[jp[0]], jp
#COMPUTE NEXT CHI SQUARED AND ACCEPTANCE VALUES
if nfits==1:
zmodel = func(nextp, *args)
nextchisq = (((zmodel - z)**2)*weights).ravel().sum()
else:
tempchisq = 0
for ii in range(nfits):
i0 = sum(npars[0:ii])
i1 = i0 + npars[ii]
this_zmodel = funcs[ii](nextp[i0:i1], *multiargs[ii])
thischisq = (((this_zmodel - zs[ii])**2) * multiweights[ii]).ravel().sum()
tempchisq += thischisq
nextchisq = tempchisq
if verbose:
print nextchisq
print nextp == original_params
accept = np.exp(0.5 * (currchisq - nextchisq))
if (accept >= 1) or (np.random.uniform(0, 1) <= accept):
#Accept step
numaccept += 1
params = np.copy(nextp)
currchisq = nextchisq
if (currchisq < bestchisq):
#New best fit
bestp = np.copy(params)
bestchisq = currchisq
if (j%nstep)==0:
allparams[:, j/nstep] = params
allchi[j/nstep] = currchisq
return allparams, bestp, numaccept, allchi
def scale_mcmc_stepsize(accept, func, params, stepsize, z, sigma, numit=1000, scales=[0.1, 0.3, 1., 3., 10.], args=(), nstep=1, posdef=None, holdfixed=None, retall=False, jointpars=None):
"""Run :func:`generic_mcmc` and scale the input stepsize to match
the desired input acceptance rate.
:INPUTS:
mostly the same as for :func:`generic_mcmc`, but also with:
accept : float between 0 and 1
desired acceptance rate; typically between 0.15 - 0.5.
scales : sequence of floats
test scaling parameters; measure for these, then interpolate to get 'accept'
retall : bool
if True, return tuple (scalefactor, acceptances, scales).
Otherwise return only the scaler 'scalefactor.'
:REQUIREMENTS:
:doc:`pylab` (for :func:`pylab.interp`)
"""
# 2011-06-13 16:06 IJMC: Created
from pylab import interp
stepsize = array(stepsize, copy=True)
nfactors = len(scales)
mcmc_accept = []
for factor in scales:
out = generic_mcmc(func, params, stepsize/factor, z, sigma, numit, args=args, nstep=nstep, posdef=posdef, holdfixed=holdfixed, jointpars=jointpars)
mcmc_accept.append(1.0 * out[2]/numit)
final_factor = interp(accept, mcmc_accept, scales)
if retall:
ret = (final_factor, mcmc_accept, scales)
else:
ret = final_factor
return ret
def unityslope(slope, ttt):
return 1. + slope*(ttt - ttt.mean())
def travisplanet(p):
"""Generate a line of text for Travis Barman's planet table.
INPUT: a planet object from :func:`getobj`.
"""
# 2012-02-14 14:52 IJMC: Created
vals1 = (p.name.replace(' ','').replace('b', ''), p.msini, p.umsini, p.r, p.r-p.ur, p.r+p.ur)
vals2 = (p.per, p.a, p.mstar, p.umstar, p.rstar, p.urstar, p.teff, p.uteff, p.fe, p.ufe)
nvals = len(vals1)+len(vals2)
fstr = '%s' + ' '*(10-len(vals1)) + ' %1.2f'*5 + ' %1.6f %1.4f' + ' %1.2f'*4 + ' %i'*2 + ' %1.2f'*2
return fstr % (vals1 + vals2 )
def stdfilt(vec, wid=3):
"""Compute the standard deviation in a sliding window.
:INPUTS:
vec : 1D sequence
data to filter
wid : int, odd
width of filter; ideally odd (not even).
"""
# 2012-04-05 13:58 IJMC: Created
filt = 0*vec
if wid<1:
wid = 1
wid = int(wid)
n = len(vec)
for ii in range(n):
i0 = np.max([0, ii -wid/2])
i1 = np.min([n-1, ii + wid/2])
filt[ii] = np.std(vec[i0:i1+1])
#print ii, i0, i1
return filt
def wmeanfilt(vec, wid=3, w=None):
"""Compute the (weighted) mean in a sliding window.
:INPUTS:
vec : 1D sequence
data to filter
wid : int, odd
width of filter; ideally odd (not even).
"""
# 2012-04-28 06:09 IJMC: Created
filt = 0*vec
if wid<1:
wid = 1
wid = int(wid)
n = len(vec)
for ii in range(n):
i0 = np.max([0, ii -wid/2])
i1 = np.min([n-1, ii + wid/2])
filt[ii] = wmean(vec[i0:i1+1], w[i0:i1+1])
#print ii, i0, i1
return filt
def planettext(planets, filename, delimiter=',', append=True):
"""Write planet object info into a delimited line of text.
:INPUTS:
planets : planet object or list thereof
filename : str
delimiter : str
"""
# 2012-01-24 16:42 IJMC: Created
if not hasattr(planets, '__iter__'):
planets = [planets]
column_headers = 'Planet Name', 'KIC', 'wfc3', 'stis', 'OIR', 'HST', 'SST', \
'M_p/M_J', 'R_p/R_J', 'P/d', 'a/AU', 'e', 'I', 'a/R*', 'b', 'k', 'T_14', 'd/pc', \
'ST', 'T_*', 'M_*', 'RA', 'Dec', 'Vmag', 'Jmag', 'Hmag', 'Kmag', 'R_s', \
'T_eff', 'g_p', '~H (gas-dom.)/km', 'Delta-D', 'Fp/Fs', 'NIR comp.?', \
'"goodness"', 'kepler_vetting', 'ECL_metric', 'TRA_metric', 'MAX_metric'
field_names = 'name', None, None, None, None, None, None, 'msini', 'r', 'per', 'a', \
'ecc', 'i', 'ar', 'b', None, 't14', 'distance', 'sptype', 'teff', 'mstar', \
'ra_string', 'dec_string', 'v', 'j', 'h', 'ks', 'rstar'
if append:
f = open(filename, 'a')
else:
f = open(filename, 'w')
for header in column_headers:
f.write('%s%s' % (header, delimiter))
f.write('\n')
for p in planets:
for field in field_names:
if field is None:
f.write(delimiter)
elif hasattr(getattr(p, field), 'upper'): # STRING
f.write('%s%s' % (getattr(p, field), delimiter))
else: # FLOAT
f.write('%1.18f%s' % (getattr(p, field), delimiter))
f.write('\n')
return
def prayerbead(*arg, **kw):
"""Generic function to perform Prayer-Bead (residual permutation) analysis.
:INPUTS:
(fitparams, modelfunction, arg1, arg2, ... , data, weights)
OR:
(allparams, (args1, args2, ..), npars=(npar1, npar2, ...))
where allparams is an array concatenation of each functions
input parameters.
:OPTIONAL INPUTS:
jointpars -- list of 2-tuples.
For use with multi-function calling (w/npars
keyword). Setting jointpars=[(0,10), (0,20)] will
always set params[10]=params[0] and
params[20]=params[0].
parinfo -- None, or list of dicts
'parinfo' to pass to the kapteyn.py kpmfit routine.
gaussprior -- list of 2-tuples, same length as "allparams."
The i^th tuple (x_i, s_i) imposes a Gaussian prior
on the i^th parameter p_i by adding ((p_i -
x_i)/s_i)^2 to the total chi-squared.
axis -- int or None
If input is 2D, which axis to permute over.
(NOT YET IMPLEMENTED!)
step -- int > 0
Stepsize for permutation steps. 1 by default.
(NOT YET IMPLEMENTED!)
verbose -- bool
Print various status lines to console.
maxiter -- int
Maximum number of iterations for _each_ fitting step.
maxfun -- int
Maximum number of function evaluations for _each_ fitting step.
threads -- int
Number of threads to use (via multiprocessing.Pool)
:EXAMPLE:
::
TBW
:REQUIREMENTS:
:doc:`kapteyn`, :doc:`phasecurves`, :doc:`numpy`
"""
# 2012-04-30 07:29 IJMC: Created
# 2012-05-03 16:35 IJMC: Now can impose gaussian priors
# 2012-09-17 14:08 IJMC: Fixed bug when shifting weights (thanks
# to P. Cubillos)
# 2014-05-01 20:52 IJMC: Now allow multiprocessing via 'threads' keyword!
#from kapteyn import kmpfit
import phasecurves as pc
from multiprocessing import Pool
if kw.has_key('axis'):
axis = kw['axis']
else:
axis = None
if kw.has_key('parinfo'):
parinfo = kw.pop('parinfo')
else:
parinfo = None
if kw.has_key('verbose'):
verbose = kw.pop('verbose')
else:
verbose = None
if kw.has_key('step'):
step = kw.pop('step')
else:
step = None
if kw.has_key('maxiter'):
maxiter = kw.pop('maxiter')
else:
maxiter = 3000
if kw.has_key('maxfun'):
maxfun = kw.pop('maxfun')
else:
maxfun = 6000
if kw.has_key('xtol'):
xtol = kw.pop('xtol')
else:
xtol = 1e-12
if kw.has_key('ftol'):
ftol = kw.pop('ftol')
else:
ftol = 1e-12
#pdb.set_trace()
if kw.has_key('threads'):
pool = Pool(processes=kw['threads'])
else:
pool = None
guessparams = arg[0]
modelfunction = arg[1]
nparam = len(guessparams)
if isinstance(arg[-1], dict):
# Surreptiously setting keyword arguments:
kw2 = arg[-1]
kw.update(kw2)
arg = arg[0:-1]
else:
pass
narg = len(arg)
helperargs = arg[2:narg-2]
data = np.array(arg[-2], copy=False)
weights = arg[-1]
if data.ndim > 1:
print "I haven't implemented 2D multi-dimensional data handling yet!"
else:
ndata = data.size
if kw.has_key('npars'):
print "I haven't yet dealt with this for prayerbead analyses!"
npars = kw['npars']
ret = []
# Excise "npars" kw for recursive calling:
lower_kw = kw.copy()
junk = lower_kw.pop('npars')
# Keep fixed pairs of joint parameters:
if kw.has_key('jointpars'):
jointpars = kw['jointpars']
for jointpar in jointpars:
params[jointpar[1]] = params[jointpar[0]]
for ii in range(len(npars)):
i0 = sum(npars[0:ii])
i1 = i0 + npars[ii]
these_params = arg[0][i0:i1]
ret.append(resfunc(these_params, *arg[1][ii], **lower_kw))
return ret
fitter_args = (modelfunction,) + helperargs + (data, weights, kw)
fmin_fit = fmin(pc.errfunc, guessparams, args=fitter_args, full_output=True, disp=False, maxiter=maxiter, maxfun=maxfun)
bestparams = np.array(fmin_fit[0], copy=True)
bestmodel = modelfunction(*((guessparams,) + helperargs))
residuals = data - bestmodel
allfits = np.zeros((ndata, nparam), dtype=float)
allfits[0] = bestparams
if verbose: print "Finished prayer bead step ",
#pdb.set_trace()
if pool is None:
allfits[1:] = np.array(map(pb_helperfunction, [[ii, bestmodel, bestparams, residuals, weights, modelfunction, helperargs, maxiter, maxfun, xtol, ftol, ndata, kw, verbose] for ii in xrange(1, ndata)]))
else:
allfits[1:] = np.array(pool.map(pb_helperfunction, [[ii, bestmodel, bestparams, residuals, weights, modelfunction, helperargs, maxiter, maxfun, xtol, ftol, ndata, kw, verbose] for ii in xrange(1, ndata)]))
return allfits
def pb_helperfunction(inputs):
"""Helper function for :func:`prayerbead`. Not for general use."""
# 2014-05-01 20:35 IJMC: Created
import phasecurves as pc
index, bestmodel, bestparams, residuals, weights, modelfunction, helperargs, maxiter, maxfun, xtol, ftol, ndata, kw, verbose = inputs
shifteddata = bestmodel + np.concatenate((residuals[index::], residuals[0:index]))
shiftedweights = np.concatenate((weights[index::], weights[0:index]))
shifted_args = (modelfunction,) + helperargs + (shifteddata, shiftedweights, kw)
fmin_fit = fmin(pc.errfunc, bestparams, args=shifted_args, full_output=True, disp=False, maxiter=maxiter, maxfun=maxfun, xtol=xtol, ftol=ftol)
if verbose: print ("%i of %i." % (index+1, ndata)),
return fmin_fit[0]
def morlet(scale, k, k0=6.0, retper=False, retcoi=False, retcdelta=False, retpsi0=False): # From Wavelet.pro; still incomplete!
n = len(k)
expnt = -0.5 * (scale * k - k0)**2 * (k > 0.)
dt = 2 * np.pi / (n*k[1])
norm = np.sqrt(2*np.pi*scale/dt) * (np.pi**-0.25) # total energy=N
morlet = norm * np.exp( np.max(np.array([expnt, 0.*expnt]), 0) )
morlet = morlet * (expnt > -100) # avoid underflow errors
morlet = morlet * (k > 0) # Heaviside step function (Morlet is complex)
fourier_factor = (4 * np.pi) / (k0 + np.sqrt(2. + k0**2)) # Scale --> Fourier
period = scale * fourier_factor
coi = fourier_factor / np.sqrt(2) # Cone-of-influence
dofmin = 2
Cdelta = -1
if k0==6: Cdelta = 0.776
psi0 = np.pi**-0.25
ret = (morlet,)
if retper:
ret = ret + (period,)
if retcoi:
ret = ret + (coi,)
if retcdelta:
ret = ret + (cdelta,)
if retpsi0:
ret = ret + (psi0,)
if len(ret)==1:
ret = ret[0]
return ret
def test_eccentric_anomaly(ecc, manom, tol=1e-8):
""" Test various methods of computing the eccentric anomaly.
ecc = scalar; manom = 1D NumPy array
Just run, e.g.:
::
ecc = 0.15
p_orb = 3.3
mean_anom = 2*pi*linspace(0, p_orb, 10000)/p_orb
an.test_eccentric_anomaly(ecc, mean_anom, tol=1e-10)
"""
# 2012-10-15 21:46 IJMC: Created, for my own curiosity.
from time import time
e0 = np.zeros(manom.size)
e1 = np.zeros(manom.size)
e2 = np.zeros(manom.size)
e3 = np.zeros(manom.size)
tic = time()
for ii,element in enumerate(manom):
def kep(e): return element - e + ecc*sin(e)
e0[ii] = optimize.brentq(kep, element-1, element+1, xtol=tol, disp=False)
toc0 = time() - tic
tic = time()
for ii,element in enumerate(manom):
def kep(e): return element - e + ecc*sin(e)
e1[ii] = optimize.newton(kep, ecc, tol=tol)
toc1 = time() - tic
tic = time()
guessfactor = np.pi * (ecc+0.01) / 0.81 # guess=pi for ecc=0.8
for ii,element in enumerate(manom): # Explicit Newton's method
err = tol*10
val = guessfactor
while np.abs(err) > tol:
err = (element + ecc*np.sin(val) - val) / (1. - ecc*np.cos(val))
val += err
e2[ii] = val
toc2 = time() - tic
tic = time()
for ii,element in enumerate(manom): # simple iteration:
err = tol*10
oldval = 0.
while np.abs(err) > tol:
val = element + ecc * np.sin(oldval)
err = val - oldval
oldval = val
e3[ii] = val
toc3 = time() - tic
print "SciPy BrentQ: [%1.6f, %1.6f, ....] -- %1.4f s" % (e0[0], e0[1], toc0)
print "SciPy Newton: [%1.6f, %1.6f, ....] -- %1.4f s" % (e1[0], e1[1], toc1)
print "Explicit Newton: [%1.6f, %1.6f, ....] -- %1.4f s" % (e2[0], e2[1], toc2)
print "Simple iteration: [%1.6f, %1.6f, ....] -- %1.4f s" % (e3[0], e3[1], toc3)
return
def fmin_helper(params, func=None, **kw_dict):
return fmin(func, params, **kw_dict)
def fmin_helper2(all_args):
"""Allows me to wrap :func:`fmin` within pool.map() for multithreading.
:EXAMPLE:
::
from multiprocessing import Pool
import analysis as an
import phasecurves as pc
pool = Pool(processes=nthreads)
fits = pool.map(an.fmin_helper2, [[pc.errfunc, pos0[jj], mcargs, test_kws] for jj in xrange(0, nwalkers, int(nwalkers/12.))])
# The above line is equivalent to, but roughly ~(nthreads)
# times faster, than the standard way to do it:
fits2 = [an.fmin(pc.errfunc, pos0[jj], mcargs, **test_kw) for jj in xrange(0, nwalkers, int(nwalkers/10.))]
:NOTES:
This must be a separate, stand-alone function in order to be
'pickleable', which is required by pool.map().
"""
# 2014-08-10 10:01 IJMC: Added documentation.
if isinstance(all_args[-1], dict):
ret = fmin(all_args[0], *all_args[1:-1], **all_args[-1])
else:
ret = fmin(all_args[0], *all_args[1:-1])
return ret
def gfit(func, x0, fprime, args=(), kwargs=dict(), maxiter=2000, ftol=0.001, factor=1., disp=False, bounds=None):
"""Perform gradient-based minimization of a user-specified function.
:INPUTS:
func : function
Function that takes as input the parameters x0, optional
additional arguments args, and optional keywords kwargs, and
returns the metric to be minimized as a scalar. For chi-squared
minimization, a generalized option is :phasecurves:`errfunc`.
x0 : sequence
List or 1D NumPy array of initial-guess parameters, to be
adjusted to minimize func(x0, *args, **kwargs).
fprime : function
Function that takes as input the parameters x0, optional
additional arguments args, and optional keywords kwargs, and
returns the partial derivatives of the metric to be minimized
with regard to each element of x0.
args : list
Optional arguments to func and fprime (see above)
kwargs : dict
Optional keywords to func and fprime (see above)
maxiter : int
Maximum number of iterations to run.
ftol : scalar
Desired tolerance on the metric to be minimized. Iteration
will continue until either iter>maxiter OR
(metric_i - metric_(i+1)) < ftol.
factor : scalar
Factor to scale gradient before applying each new
iteration. Small values will lead to slower convergences;
large values will lead to wild behavior. The code attempts to
(crudely) tune the value of 'factor' depending on how the
minimization process progresses.
disp : bool
If True, print some text to screen on each iteration.
bounds : None, or list
(min, max) pairs for each element in x0, defining the
bounds on that parameter. Use None or +/-inf for one of
min or max when there is no bound in that direction.
:RETURNS:
(params, metric, n_iter)
:NOTES:
The program attempts to be slightly clever: if the metric
decreases by <ftol on one iteration, the code iterates one more
time. If the termination criterion is once again met then
minimization ends; if not, minimization continues as before.
For quicker, smarter routines that do much the same thing, you
may want to check out the functions in the scipy.optimize package.
"""
# 2013-08-09 10:37 IJMC: Created
# 2013-08-11 16:06 IJMC: Added a missing boolean flag
if bounds is not None:
bounds = np.array(bounds)
def applyBounds(params):
if bounds is not None:
params = np.vstack((params, bounds[:,0])).max(0)
params = np.vstack((params, bounds[:,1])).min(0)
return params
bestparams = applyBounds(x0)
nx = bestparams.size
metric = func(x0, *args, **kwargs)
dmetric = 9e9
keepFitting = True
lastIterSaidToStop = False
iter = 0
recalcGrad = True
if disp:
fmtstr = '%7i %1.'+str(np.abs(np.log(ftol)).astype(int)+2)+'f %1.5e %1.3e'
print ' ITER METRIC FACTOR DMETRIC'
while iter<maxiter and keepFitting:
iter += 1
if recalcGrad: grad = fprime(bestparams, *args, **kwargs)
newparam = applyBounds(bestparams - factor * grad)
newmetric = func(newparam, *args, **kwargs)
if newmetric < metric:
bestparams = newparam.copy()
dmetric = newmetric - metric
metric = newmetric
if recalcGrad is True: factor *= 1.5 # we updated twice in a row!
recalcGrad = True
if np.abs(dmetric) < ftol:
if disp: print "Met termination criterion"
if lastIterSaidToStop:
keepFitting = False
else:
lastIterSaidToStop = True
else:
factor /= 2
recalcGrad = False
lastIterSaidToStop = False
if disp: print fmtstr % (iter, metric, factor, dmetric)
return bestparams, metric, iter
def lnprob(x, ivar):
return -0.5 * np.sum(ivar * x ** 2)
def returnSections(time, dtmax=0.1):
"""Return 2-tuples that are the indices of separate sections, as
indicated by breaks in a continuous and always-increasing time
series.
:INPUTS:
time : 1D NumPy array
The time index of interest. Should be always increasing, such
that numpy.diff(time) is always positive.
dtmax : float
Any break in 'time' equal to or larger than this indicates a
new segment.
:EXAMPLE:
::
import transit
# Simulate a time series with 30-minute sampling:
t1 = np.arange(0, 3.7, 0.5/24)
t2 = np.arange(5, 70, 0.5/24)
t3 = np.arange(70.2, 85, 0.5/24)
days = np.concatenate((t1, t2, t3))
ret = transit.returnSections(days, dtmax=0.1)
# If each segment was correctly identified, these print 'True':
print (t1==days[ret[0][0]:ret[0][1]+1]).all()
print (t2==days[ret[1][0]:ret[1][1]+1]).all()
print (t3==days[ret[2][0]:ret[2][1]+1]).all()
"""
# 2014-08-11 16:52 IJMC: Created
dt = np.diff(time)
inds = np.concatenate(([-1], (dt>=dtmax).nonzero()[0], [time.size]))
ret = [[inds[ii]+1, inds[ii+1]] for ii in xrange(inds.size-1)]
ret[-1][-1] -= 1
return ret
def total_least_squares(data1, data2, data1err=None, data2err=None,
print_results=False, ignore_nans=True, intercept=True,
return_error=False, inf=1e10):
"""
Use Singular Value Decomposition to determine the Total Least Squares linear fit to the data.
(e.g. http://en.wikipedia.org/wiki/Total_least_squares)
data1 - x array
data2 - y array
if intercept:
returns m,b in the equation y = m x + b
else:
returns m
print tells you some information about what fraction of the variance is accounted for
ignore_nans will remove NAN values from BOTH arrays before computing
Parameters
----------
data1,data2 : np.ndarray
Vectors of the same length indicating the 'x' and 'y' vectors to fit
data1err,data2err : np.ndarray or None
Vectors of the same length as data1,data2 holding the 1-sigma error values
Notes
-----
From https://code.google.com/p/agpy/
"""
# 2014-08-26 07:44 IJMC: Copied from https://code.google.com/p/agpy/
if ignore_nans:
badvals = numpy.isnan(data1) + numpy.isnan(data2)
if data1err is not None:
badvals += numpy.isnan(data1err)
if data2err is not None:
badvals += numpy.isnan(data2err)
goodvals = True-badvals
if goodvals.sum() < 2:
if intercept:
return 0,0
else:
return 0
if badvals.sum():
data1 = data1[goodvals]
data2 = data2[goodvals]
if intercept:
dm1 = data1.mean()
dm2 = data2.mean()
else:
dm1,dm2 = 0,0
arr = numpy.array([data1-dm1,data2-dm2]).T
U,S,V = numpy.linalg.svd(arr, full_matrices=False)
# v should be sorted.
# this solution should be equivalent to v[1,0] / -v[1,1]
# but I'm using this: http://stackoverflow.com/questions/5879986/pseudo-inverse-of-sparse-matrix-in-python
M = V[-1,0]/-V[-1,-1]
varfrac = S[0]/S.sum()*100
if varfrac < 50:
raise ValueError("ERROR: SVD/TLS Linear Fit accounts for less than half the variance; this is impossible by definition.")
# this is performed after so that TLS gives a "guess"
if data1err is not None or data2err is not None:
try:
from scipy.odr import RealData,Model,ODR
except ImportError:
raise ImportError("Could not import scipy; cannot run Total Least Squares")
def linmodel(B,x):
if intercept:
return B[0]*x + B[1]
else:
return B[0]*x
if data1err is not None:
data1err = data1err[goodvals]
data1err[data1err<=0] = inf
if data2err is not None:
data2err = data2err[goodvals]
data2err[data2err<=0] = inf
if any([data1.shape != other.shape for other in (data2,data1err,data2err)]):
raise ValueError("Data shapes do not match")
linear = Model(linmodel)
data = RealData(data1,data2,sx=data1err,sy=data2err)
B = data2.mean() - M*data1.mean()
beta0 = [M,B] if intercept else [M]
myodr = ODR(data,linear,beta0=beta0)
output = myodr.run()
if print_results:
output.pprint()
if return_error:
return numpy.concatenate([output.beta,output.sd_beta])
else:
return output.beta
if intercept:
B = data2.mean() - M*data1.mean()
if print_results:
print "TLS Best fit y = %g x + %g" % (M,B)
print "The fit accounts for %0.3g%% of the variance." % (varfrac)
print "Chi^2 = %g, N = %i" % (((data2-(data1*M+B))**2).sum(),data1.shape[0]-2)
return M,B
else:
if print_results:
print "TLS Best fit y = %g x" % (M)
print "The fit accounts for %0.3g%% of the variance." % (varfrac)
print "Chi^2 = %g, N = %i" % (((data2-(data1*M))**2).sum(),data1.shape[0]-1)
return M
def confmap(map, frac, **kw):
"""Return the confidence level of a 2D histogram or array that
encloses the specified fraction of the total sum.
:INPUTS:
map : 1D or 2D numpy array
Probability map (from hist2d or kde)
frac : float, 0 <= frac <= 1
desired fraction of enclosed energy of map
:OPTIONS:
ordinate : None or 1D array
If 1D map, interpolates onto the desired value. This could
cause problems when you aren't just setting upper/lower
limits....
:SEE_ALSO:
:func:`dumbconf` for 1D distributions
"""
# 2010-07-26 12:54 IJC: Created
# 2011-11-05 14:29 IJMC: Fixed so it actually does what it's supposed to!
# 2014-09-05 21:11 IJMC: Moved from kdestats to analysis.py. Added
# errorcheck on 'frac'.
from scipy.optimize import bisect
if frac<0 or frac >1:
print "Input 'frac' to confmap() must be 0 <= f <= 1."
stop
def diffsum(level, map, ndesired):
return ((1.0*map[map >= level].sum()/map.sum() - ndesired))
if hasattr(frac,'__iter__'):
return [confmap(map,thisfrac, **kw) for thisfrac in frac]
#nx, ny = map.shape
#ntot = map.size
#n = int(ntot*frac)
#guess = map.max()
#dx = 10.*float((guess-map.min())/ntot)
#thisn = map[map<=guess].sum()
ret = bisect(diffsum, map.min()-1, map.max()+1, args=(map, frac))
if kw.has_key('ordinate') and kw['ordinate'] is not None:
sortind = np.argsort(map)
ret = np.interp(ret, map[sortind], kw['ordinate'][sortind])
return ret
def rv_semiamplitude(m1=None, m2=None, P=None, a=None, K=None, e=0., approx=False):
"""Estimate RV Semiamplitude (or other quantities derived from it).
:INPUTS:
m1 : scalar
Primary mass (in SOLAR MASSES)
m2 : scalar
Secondary mass (in JUPITER MASSES)
P : scalar
Orbital period (in DAYS). Not needed if 'a' is input.
a : scalar
Semimajor axis (in AU). Not needed if 'P' is input.
K : scalar
RV Semiamplitude (in m/s)
e : scalar
Orbital eccentricity. Defaults to zero.
approx : bool
If True, implicitly assume m2<<m1 (in physical units). Note that
this is required if you want to pass in vector inputs.
:EXAMPLES:
::
import analysis as an
import numpy as np
# Calculate K given Earth's known parameters:
print an.rv_semiamplitude(1,an.mearth/an.mjup, P=365.)
# Infer a planet mass from a measured K = 1.96 +/- 0.77 m/s:
k1s = np.random.normal(loc=1.96, scale=0.77, size=1e5)
m1_best = an.rv_semiamplitude(m1=0.85, P=0.9596, K=1.96)
m1_mc = an.rv_semiamplitude(m1=0.85, P=0.9596, K=k1s, approx=True)
print '%1.3f +/- %1.3f M_Jup' % (m1_best, np.std(m1_mc))
"""
rvconst = 28.4329 # m/s; default scaling from Lovis & Fischer paper
eccterm = np.sqrt(1. - e**2)
ret = None
if a is None:
if m2 is None:
lastTermLowM2 = (m1**-2 * (365.24/P))**(1./3.)
else:
lastTerm = ((m1 + m2*mjup/msun)**-2 * (365.24/P))**(1./3.)
elif P is None:
if m2 is None:
lastTermLowM2 = 1./np.sqrt(m1 * a)
else:
lastTerm = np.sqrt((1./(m1 + m2*mjup/msun) * (1./a)))
if K is None:
ret = rvconst / eccterm * m2 * lastTerm
elif m2 is None:
approxroot = K * eccterm / rvconst / lastTermLowM2
if approx:
ret = approxroot
else:
if P is None:
coef1 = (rvconst/eccterm)**2
coef2 = -K**2 * a * (mjup/msun)
coef3 = -K**2 * a * m1
coefs = [coef1, coef2, coef3]
elif a is None:
coef1 = (rvconst/eccterm)**3
coef2 = -K**3 * (P/365.24) * (mjup/msun)
coef3 = 0.
coef4 = -K**3 * (P/365.24) * m1
coefs = [coef1, coef2, coef3, coef4]
roots = np.roots(coefs)
goodrootind = (np.imag(roots) <= 1e-10) * (roots >= 0)
validroots = roots[goodrootind]
if validroots.size==0:
print "No exact, analytic RV semiamplitude found! Assuming m2<<m1."
ret = approxroot
elif validroots.size>1:
print "Multiple possible RV semiamplitudes found. Assuming m2<m1."
ret = validroots[np.abs(validroots - approxroot).min()==np.abs(validroots - approxroot)]
else:
ret = np.real(validroots)
return ret
|
ndokos/pbench
|
refs/heads/master
|
lib/pbench/test/functional/agent/cli/commands/results/test_clear_results.py
|
3
|
import pytest
def test_pbench_clear_results_help():
command = ["pbench-clear-results", "--help"]
out, err, exitcode = pytest.helpers.capture(command)
assert b"Usage: pbench-clear-results [OPTIONS]" in out
assert exitcode == 0
def test_clear_results(monkeypatch, agent_config, pbench_run, pbench_cfg):
monkeypatch.setenv("_PBENCH_AGENT_CONFIG", str(pbench_cfg))
tool_default = pbench_run / "tools-v1-default" / "testhost.example.com"
tool_default.mkdir(parents=True)
mpstat = tool_default / "mpstat"
mpstat.touch()
tmp_dir = pbench_run / "tmp" / "leave-me"
tmp_dir.mkdir(parents=True)
tmp_dir = tmp_dir / "alone"
tmp_dir.touch()
junk = pbench_run / "foo"
junk.touch()
# test-63
command = ["pbench-clear-results"]
out, err, exitcode = pytest.helpers.capture(command)
assert exitcode == 0
assert tool_default.exists() is True
assert tmp_dir.exists() is True
assert junk.exists() is False
|
mglukhikh/intellij-community
|
refs/heads/master
|
python/testData/copyPaste/TopLevelIfStatementWithMultilineCondition.after.py
|
35
|
if (True or (True or
False)):
x = 1
y = 2
|
appsoma/kafka
|
refs/heads/trunk
|
system_test/utils/system_test_utils.py
|
88
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# system_test_utils.py
# ===================================
import copy
import difflib
import inspect
import json
import logging
import os
import re
import signal
import socket
import subprocess
import sys
import time
logger = logging.getLogger("namedLogger")
aLogger = logging.getLogger("anonymousLogger")
thisClassName = '(system_test_utils)'
d = {'name_of_class': thisClassName}
def get_current_unix_timestamp():
ts = time.time()
return "{0:.6f}".format(ts)
def get_local_hostname():
return socket.gethostname()
def sys_call(cmdStr):
output = ""
#logger.info("executing command [" + cmdStr + "]", extra=d)
p = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout.readlines():
output += line
return output
def remote_async_sys_call(host, cmd):
cmdStr = "ssh " + host + " \"" + cmd + "\""
logger.info("executing command [" + cmdStr + "]", extra=d)
async_sys_call(cmdStr)
def remote_sys_call(host, cmd):
cmdStr = "ssh " + host + " \"" + cmd + "\""
logger.info("executing command [" + cmdStr + "]", extra=d)
sys_call(cmdStr)
def get_dir_paths_with_prefix(fullPath, dirNamePrefix):
dirsList = []
for dirName in os.listdir(fullPath):
if not os.path.isfile(dirName) and dirName.startswith(dirNamePrefix):
dirsList.append(os.path.abspath(fullPath + "/" + dirName))
return dirsList
def get_testcase_prop_json_pathname(testcasePathName):
testcaseDirName = os.path.basename(testcasePathName)
return testcasePathName + "/" + testcaseDirName + "_properties.json"
def get_json_list_data(infile):
json_file_str = open(infile, "r").read()
json_data = json.loads(json_file_str)
data_list = []
for key,settings in json_data.items():
if type(settings) == list:
for setting in settings:
if type(setting) == dict:
kv_dict = {}
for k,v in setting.items():
kv_dict[k] = v
data_list.append(kv_dict)
return data_list
def get_dict_from_list_of_dicts(listOfDicts, lookupKey, lookupVal):
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
#
# Usage:
#
# 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role")
# returns:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
#
# 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role")
# returns:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
retList = []
if ( lookupVal is None or lookupKey is None ):
for dict in listOfDicts:
for k,v in dict.items():
if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY
retList.append( dict )
else:
for dict in listOfDicts:
for k,v in dict.items():
if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal
retList.append( dict )
return retList
def get_data_from_list_of_dicts(listOfDicts, lookupKey, lookupVal, fieldToRetrieve):
# Sample List of Dicts:
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '0', 'role': 'zookeeper', 'hostname': 'localhost'}
# {'kafka_home': '/mnt/u001/kafka_0.8_sanity', 'entity_id': '1', 'role': 'broker', 'hostname': 'localhost'}
#
# Usage:
# 1. get_data_from_list_of_dicts(self.clusterConfigsList, "entity_id", "0", "role")
# => returns ['zookeeper']
# 2. get_data_from_list_of_dicts(self.clusterConfigsList, None, None, "role")
# => returns ['zookeeper', 'broker']
retList = []
if ( lookupVal is None or lookupKey is None ):
for dict in listOfDicts:
for k,v in dict.items():
if ( k == fieldToRetrieve ): # match with fieldToRetrieve ONLY
try:
retList.append( dict[fieldToRetrieve] )
except:
logger.debug("field not found: " + fieldToRetrieve, extra=d)
else:
for dict in listOfDicts:
for k,v in dict.items():
if ( k == lookupKey and v == lookupVal ): # match with lookupKey and lookupVal
try:
retList.append( dict[fieldToRetrieve] )
except:
logger.debug("field not found: " + fieldToRetrieve, extra=d)
return retList
def get_data_by_lookup_keyval(listOfDict, lookupKey, lookupVal, fieldToRetrieve):
returnValue = ""
returnValuesList = get_data_from_list_of_dicts(listOfDict, lookupKey, lookupVal, fieldToRetrieve)
if len(returnValuesList) > 0:
returnValue = returnValuesList[0]
return returnValue
def get_json_dict_data(infile):
json_file_str = open(infile, "r").read()
json_data = json.loads(json_file_str)
data_dict = {}
for key,val in json_data.items():
if ( type(val) != list ):
data_dict[key] = val
return data_dict
def get_remote_child_processes(hostname, pid):
pidStack = []
cmdList = ['''ssh ''' + hostname,
''''pid=''' + pid + '''; prev_pid=""; echo $pid;''',
'''while [[ "x$pid" != "x" ]];''',
'''do prev_pid=$pid;''',
''' for child in $(ps -o pid,ppid ax | awk "{ if ( \$2 == $pid ) { print \$1 }}");''',
''' do echo $child; pid=$child;''',
''' done;''',
''' if [ $prev_pid == $pid ]; then''',
''' break;''',
''' fi;''',
'''done' 2> /dev/null''']
cmdStr = " ".join(cmdList)
logger.debug("executing command [" + cmdStr, extra=d)
subproc = subprocess.Popen(cmdStr, shell=True, stdout=subprocess.PIPE)
for line in subproc.stdout.readlines():
procId = line.rstrip('\n')
pidStack.append(procId)
return pidStack
def get_child_processes(pid):
pidStack = []
currentPid = pid
parentPid = ""
pidStack.append(pid)
while ( len(currentPid) > 0 ):
psCommand = subprocess.Popen("ps -o pid --ppid %s --noheaders" % currentPid, shell=True, stdout=subprocess.PIPE)
psOutput = psCommand.stdout.read()
outputLine = psOutput.rstrip('\n')
childPid = outputLine.lstrip()
if ( len(childPid) > 0 ):
pidStack.append(childPid)
currentPid = childPid
else:
break
return pidStack
def sigterm_remote_process(hostname, pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -15 " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def sigkill_remote_process(hostname, pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -9 " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def simulate_garbage_collection_pause_in_remote_process(hostname, pidStack, pauseTimeInSeconds):
pausedPidStack = []
# pause the processes
while len(pidStack) > 0:
pid = pidStack.pop()
pausedPidStack.append(pid)
cmdStr = "ssh " + hostname + " 'kill -SIGSTOP " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
time.sleep(int(pauseTimeInSeconds))
# resume execution of the processes
while len(pausedPidStack) > 0:
pid = pausedPidStack.pop()
cmdStr = "ssh " + hostname + " 'kill -SIGCONT " + pid + "'"
try:
logger.debug("executing command [" + cmdStr + "]", extra=d)
sys_call_return_subproc(cmdStr)
except:
print "WARN - pid:",pid,"not found"
raise
def terminate_process(pidStack):
while ( len(pidStack) > 0 ):
pid = pidStack.pop()
try:
os.kill(int(pid), signal.SIGTERM)
except:
print "WARN - pid:",pid,"not found"
raise
def convert_keyval_to_cmd_args(configFilePathname):
cmdArg = ""
inlines = open(configFilePathname, "r").readlines()
for inline in inlines:
line = inline.rstrip()
tokens = line.split('=', 1)
if (len(tokens) == 2):
cmdArg = cmdArg + " --" + tokens[0] + " " + tokens[1]
elif (len(tokens) == 1):
cmdArg = cmdArg + " --" + tokens[0]
else:
print "ERROR: unexpected arguments list", line
return cmdArg
def async_sys_call(cmd_str):
subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
def sys_call_return_subproc(cmd_str):
p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return p
def remote_host_file_exists(hostname, pathname):
cmdStr = "ssh " + hostname + " 'ls " + pathname + "'"
logger.debug("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
if "No such file or directory" in line:
return False
return True
def remote_host_directory_exists(hostname, path):
cmdStr = "ssh " + hostname + " 'ls -d " + path + "'"
logger.debug("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
if "No such file or directory" in line:
return False
return True
def remote_host_processes_stopped(hostname):
cmdStr = "ssh " + hostname + \
" \"ps auxw | grep -v grep | grep -v Bootstrap | grep -i 'java\|run\-\|producer\|consumer\|jmxtool\|kafka' | wc -l\" 2> /dev/null"
logger.info("executing command: [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.info("no. of running processes found : [" + line + "]", extra=d)
if line == '0':
return True
return False
def setup_remote_hosts(systemTestEnv):
# sanity check on remote hosts to make sure:
# - all directories (eg. java_home) specified in cluster_config.json exists in all hosts
# - no conflicting running processes in remote hosts
aLogger.info("=================================================")
aLogger.info("setting up remote hosts ...")
aLogger.info("=================================================")
clusterEntityConfigDictList = systemTestEnv.clusterEntityConfigDictList
localKafkaHome = os.path.abspath(systemTestEnv.SYSTEM_TEST_BASE_DIR + "/..")
# when configuring "default" java_home, use JAVA_HOME environment variable, if exists
# otherwise, use the directory with the java binary
localJavaHome = os.environ.get('JAVA_HOME')
if localJavaHome is not None:
localJavaBin = localJavaHome + '/bin/java'
else:
subproc = sys_call_return_subproc("which java")
for line in subproc.stdout.readlines():
if line.startswith("which: no "):
logger.error("No Java binary found in local host", extra=d)
return False
else:
line = line.rstrip('\n')
localJavaBin = line
matchObj = re.match("(.*)\/bin\/java$", line)
localJavaHome = matchObj.group(1)
listIndex = -1
for clusterEntityConfigDict in clusterEntityConfigDictList:
listIndex += 1
hostname = clusterEntityConfigDict["hostname"]
kafkaHome = clusterEntityConfigDict["kafka_home"]
javaHome = clusterEntityConfigDict["java_home"]
if hostname == "localhost" and javaHome == "default":
clusterEntityConfigDictList[listIndex]["java_home"] = localJavaHome
if hostname == "localhost" and kafkaHome == "default":
clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome
if hostname == "localhost" and kafkaHome == "system_test/migration_tool_testsuite/0.7":
clusterEntityConfigDictList[listIndex]["kafka_home"] = localKafkaHome + "/system_test/migration_tool_testsuite/0.7"
kafkaHome = clusterEntityConfigDict["kafka_home"]
javaHome = clusterEntityConfigDict["java_home"]
logger.debug("checking java binary [" + localJavaBin + "] in host [" + hostname + "]", extra=d)
if not remote_host_directory_exists(hostname, javaHome):
logger.error("Directory not found: [" + javaHome + "] in host [" + hostname + "]", extra=d)
return False
logger.debug("checking directory [" + kafkaHome + "] in host [" + hostname + "]", extra=d)
if not remote_host_directory_exists(hostname, kafkaHome):
logger.info("Directory not found: [" + kafkaHome + "] in host [" + hostname + "]", extra=d)
if hostname == "localhost":
return False
else:
localKafkaSourcePath = systemTestEnv.SYSTEM_TEST_BASE_DIR + "/.."
logger.debug("copying local copy of [" + localKafkaSourcePath + "] to " + hostname + ":" + kafkaHome, extra=d)
copy_source_to_remote_hosts(hostname, localKafkaSourcePath, kafkaHome)
return True
def copy_source_to_remote_hosts(hostname, sourceDir, destDir):
cmdStr = "rsync -avz --delete-before " + sourceDir + "/ " + hostname + ":" + destDir
logger.info("executing command [" + cmdStr + "]", extra=d)
subproc = sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
dummyVar = 1
def remove_kafka_home_dir_at_remote_hosts(hostname, kafkaHome):
if remote_host_file_exists(hostname, kafkaHome + "/bin/kafka-run-class.sh"):
cmdStr = "ssh " + hostname + " 'chmod -R 777 " + kafkaHome + "'"
logger.info("executing command [" + cmdStr + "]", extra=d)
sys_call(cmdStr)
cmdStr = "ssh " + hostname + " 'rm -rf " + kafkaHome + "'"
logger.info("executing command [" + cmdStr + "]", extra=d)
#sys_call(cmdStr)
else:
logger.warn("possible destructive command [" + cmdStr + "]", extra=d)
logger.warn("check config file: system_test/cluster_config.properties", extra=d)
logger.warn("aborting test...", extra=d)
sys.exit(1)
def get_md5_for_file(filePathName, blockSize=8192):
md5 = hashlib.md5()
f = open(filePathName, 'rb')
while True:
data = f.read(blockSize)
if not data:
break
md5.update(data)
return md5.digest()
def load_cluster_config(clusterConfigPathName, clusterEntityConfigDictList):
# empty the list
clusterEntityConfigDictList[:] = []
# retrieve each entity's data from cluster config json file
# as "dict" and enter them into a "list"
jsonFileContent = open(clusterConfigPathName, "r").read()
jsonData = json.loads(jsonFileContent)
for key, cfgList in jsonData.items():
if key == "cluster_config":
for cfg in cfgList:
clusterEntityConfigDictList.append(cfg)
def setup_remote_hosts_with_testcase_level_cluster_config(systemTestEnv, testCasePathName):
# =======================================================================
# starting a new testcase, check for local cluster_config.json
# =======================================================================
# 1. if there is a xxxx_testsuite/testcase_xxxx/cluster_config.json
# => load it into systemTestEnv.clusterEntityConfigDictList
# 2. if there is NO testcase_xxxx/cluster_config.json but has a xxxx_testsuite/cluster_config.json
# => retore systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite
# 3. if there is NO testcase_xxxx/cluster_config.json NOR xxxx_testsuite/cluster_config.json
# => restore system_test/cluster_config.json
testCaseLevelClusterConfigPathName = testCasePathName + "/cluster_config.json"
if os.path.isfile(testCaseLevelClusterConfigPathName):
# if there is a cluster_config.json in this directory, load it and use it for this testsuite
logger.info("found a new cluster_config : " + testCaseLevelClusterConfigPathName, extra=d)
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# load the cluster config for this testcase level
load_cluster_config(testCaseLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList)
# back up this testcase level cluster config
systemTestEnv.clusterEntityConfigDictListLastFoundInTestCase = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList)
elif len(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite) > 0:
# if there is NO testcase_xxxx/cluster_config.json, but has a xxxx_testsuite/cluster_config.json
# => restore the config in xxxx_testsuite/cluster_config.json
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite)
else:
# if there is NONE, restore the config in system_test/cluster_config.json
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel)
# set up remote hosts
if not setup_remote_hosts(systemTestEnv):
logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d)
print
sys.exit(1)
print
def setup_remote_hosts_with_testsuite_level_cluster_config(systemTestEnv, testModulePathName):
# =======================================================================
# starting a new testsuite, check for local cluster_config.json:
# =======================================================================
# 1. if there is a xxxx_testsuite/cluster_config.son
# => load it into systemTestEnv.clusterEntityConfigDictList
# 2. if there is NO xxxx_testsuite/cluster_config.son
# => restore system_test/cluster_config.json
testSuiteLevelClusterConfigPathName = testModulePathName + "/cluster_config.json"
if os.path.isfile(testSuiteLevelClusterConfigPathName):
# if there is a cluster_config.json in this directory, load it and use it for this testsuite
logger.info("found a new cluster_config : " + testSuiteLevelClusterConfigPathName, extra=d)
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# load the cluster config for this testsuite level
load_cluster_config(testSuiteLevelClusterConfigPathName, systemTestEnv.clusterEntityConfigDictList)
# back up this testsuite level cluster config
systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite = copy.deepcopy(systemTestEnv.clusterEntityConfigDictList)
else:
# if there is NONE, restore the config in system_test/cluster_config.json
# empty the last testsuite level cluster config list
systemTestEnv.clusterEntityConfigDictListLastFoundInTestSuite[:] = []
# empty the current cluster config list
systemTestEnv.clusterEntityConfigDictList[:] = []
# restore the system_test/cluster_config.json
systemTestEnv.clusterEntityConfigDictList = copy.deepcopy(systemTestEnv.clusterEntityConfigDictListInSystemTestLevel)
# set up remote hosts
if not setup_remote_hosts(systemTestEnv):
logger.error("Remote hosts sanity check failed. Aborting test ...", extra=d)
print
sys.exit(1)
print
# =================================================
# lists_diff_count
# - find the no. of different items in both lists
# - both lists need not be sorted
# - input lists won't be changed
# =================================================
def lists_diff_count(a, b):
c = list(b)
d = []
for item in a:
try:
c.remove(item)
except:
d.append(item)
if len(d) > 0:
print "#### Mismatch MessageID"
print d
return len(c) + len(d)
# =================================================
# subtract_list
# - subtract items in listToSubtract from mainList
# and return the resulting list
# - both lists need not be sorted
# - input lists won't be changed
# =================================================
def subtract_list(mainList, listToSubtract):
remainingList = list(mainList)
for item in listToSubtract:
try:
remainingList.remove(item)
except:
pass
return remainingList
# =================================================
# diff_lists
# - find the diff of 2 lists and return the
# total no. of mismatch from both lists
# - diff of both lists includes:
# - no. of items mismatch
# - ordering of the items
#
# sample lists:
# a = ['8','4','3','2','1']
# b = ['8','3','4','2','1']
#
# difflib will return the following:
# 8
# + 3
# 4
# - 3
# 2
# 1
#
# diff_lists(a,b) returns 2 and prints the following:
# #### only in seq 2 : + 3
# #### only in seq 1 : - 3
# =================================================
def diff_lists(a, b):
mismatchCount = 0
d = difflib.Differ()
diff = d.compare(a,b)
for item in diff:
result = item[0:1].strip()
if len(result) > 0:
mismatchCount += 1
if '-' in result:
logger.debug("#### only in seq 1 : " + item, extra=d)
elif '+' in result:
logger.debug("#### only in seq 2 : " + item, extra=d)
return mismatchCount
|
halostatue/ansible
|
refs/heads/devel
|
lib/ansible/module_utils/cloudstack.py
|
19
|
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
def cs_argument_spec():
return dict(
api_key = dict(default=None),
api_secret = dict(default=None, no_log=True),
api_url = dict(default=None),
api_http_method = dict(choices=['get', 'post'], default='get'),
api_timeout = dict(type='int', default=10),
api_region = dict(default='cloudstack'),
)
def cs_required_together():
return [['api_key', 'api_secret', 'api_url']]
class AnsibleCloudStack(object):
def __init__(self, module):
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
}
# Common returns, will be merged with self.returns
# search_for_key: replace_with_key
self.common_returns = {
'id': 'id',
'name': 'name',
'created': 'created',
'zonename': 'zone',
'state': 'state',
'project': 'project',
'account': 'account',
'domain': 'domain',
'displaytext': 'display_text',
'displayname': 'display_name',
'description': 'description',
}
# Init returns dict for use in subclasses
self.returns = {}
# these values will be casted to int
self.returns_to_int = {}
self.module = module
self._connect()
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.zone = None
self.vm = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
self.tags = None
def _connect(self):
api_key = self.module.params.get('api_key')
api_secret = self.module.params.get('secret_key')
api_url = self.module.params.get('api_url')
api_http_method = self.module.params.get('api_http_method')
api_timeout = self.module.params.get('api_timeout')
if api_key and api_secret and api_url:
self.cs = CloudStack(
endpoint=api_url,
key=api_key,
secret=api_secret,
timeout=api_timeout,
method=api_http_method
)
else:
api_region = self.module.params.get('api_region', 'cloudstack')
self.cs = CloudStack(**read_config(api_region))
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
# TODO: for backward compatibility only, remove if not used anymore
def _has_changed(self, want_dict, current_dict, only_keys=None):
return self.has_changed(want_dict=want_dict, current_dict=current_dict, only_keys=only_keys)
def has_changed(self, want_dict, current_dict, only_keys=None):
for key, value in want_dict.iteritems():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue;
# Skip None values
if value is None:
continue;
if key in current_dict:
# API returns string for int in some cases, just to make sure
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, str):
current_dict[key] = str(current_dict[key])
# Only need to detect a singe change, not every item
if value != current_dict[key]:
return True
return False
def _get_by_key(self, key=None, my_dict={}):
if key:
if key in my_dict:
return my_dict[key]
self.module.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
projects = self.cs.listProjects(**args)
if projects:
for p in projects['project']:
if project.lower() in [ p['name'].lower(), p['id'] ]:
self.project = p
return self._get_by_key(key, self.project)
self.module.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.module.fail_json(msg="IP address param 'ip_address' is required")
args = {}
args['ipaddress'] = ip_address
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
ip_addresses = self.cs.listPublicIpAddresses(**args)
if not ip_addresses:
self.module.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.module.fail_json(msg="Virtual machine param 'vm' is required")
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
vms = self.cs.listVirtualMachines(**args)
if vms:
for v in vms['virtualmachine']:
if vm in [ v['name'], v['displayname'], v['id'] ]:
self.vm = v
return self._get_by_key(key, self.vm)
self.module.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
zones = self.cs.listZones()
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone in [ z['name'], z['id'] ]:
self.zone = z
return self._get_by_key(key, self.zone)
self.module.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.cs.listOsTypes()
if os_types:
for o in os_types['ostype']:
if os_type in [ o['description'], o['id'] ]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.module.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.cs.listHypervisors()
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.module.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.module.fail_json(msg="Account must be specified with Domain")
args = {}
args['name'] = account
args['domainid'] = self.get_domain(key='id')
args['listall'] = True
accounts = self.cs.listAccounts(**args)
if accounts:
self.account = accounts['account'][0]
return self._get_by_key(key, self.account)
self.module.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
return None
args = {}
args['listall'] = True
domains = self.cs.listDomains(**args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [ domain.lower(), "root/" + domain.lower(), "root" + domain.lower() ]:
self.domain = d
return self._get_by_key(key, self.domain)
self.module.fail_json(msg="Domain '%s' not found" % domain)
def get_tags(self, resource=None):
if not self.tags:
args = {}
args['projectid'] = self.get_project(key='id')
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['resourceid'] = resource['id']
response = self.cs.listTags(**args)
self.tags = response.get('tag', [])
existing_tags = []
if self.tags:
for tag in self.tags:
existing_tags.append({'key': tag['key'], 'value': tag['value']})
return existing_tags
def _process_tags(self, resource, resource_type, tags, operation="create"):
if tags:
self.result['changed'] = True
if not self.module.check_mode:
args = {}
args['resourceids'] = resource['id']
args['resourcetype'] = resource_type
args['tags'] = tags
if operation == "create":
self.cs.createTags(**args)
else:
self.cs.deleteTags(**args)
def _tags_that_should_exist_or_be_updated(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in tags if tag not in existing_tags]
def _tags_that_should_not_exist(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in existing_tags if tag not in tags]
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.module.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags))
self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete")
self.tags = None
resource['tags'] = self.get_tags(resource)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.cs.listCapabilities()
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
# TODO: for backward compatibility only, remove if not used anymore
def _poll_job(self, job=None, key=None):
return self.poll_job(job=job, key=key)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.cs.queryAsyncJobResult(jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.module.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
def get_result(self, resource):
if resource:
returns = self.common_returns.copy()
returns.update(self.returns)
for search_key, return_key in returns.iteritems():
if search_key in resource:
self.result[return_key] = resource[search_key]
# Bad bad API does not always return int when it should.
for search_key, return_key in self.returns_to_int.iteritems():
if search_key in resource:
self.result[return_key] = int(resource[search_key])
# Special handling for tags
if 'tags' in resource:
self.result['tags'] = []
for tag in resource['tags']:
result_tag = {}
result_tag['key'] = tag['key']
result_tag['value'] = tag['value']
self.result['tags'].append(result_tag)
return self.result
|
sodo13/openpli-gls
|
refs/heads/master
|
tests/test_timer.py
|
56
|
import enigma
import sys
import time
import tests
#enigma.reset()
def test_timer(repeat = 0, timer_start = 3600, timer_length = 1000, sim_length = 86400 * 7):
import NavigationInstance
at = time.time()
t = NavigationInstance.instance.RecordTimer
print t
print "old mwt:", t.MaxWaitTime
t.MaxWaitTime = 86400 * 1000
# hack:
NavigationInstance.instance.SleepTimer.MaxWaitTime = 86400 * 1000
t.processed_timers = [ ]
t.timer_list = [ ]
# generate a timer to test
import xml.etree.cElementTree
import RecordTimer
timer = RecordTimer.createTimer(xml.etree.cElementTree.fromstring(
"""
<timer
begin="%d"
end="%d"
serviceref="1:0:1:6DD2:44D:1:C00000:0:0:0:"
repeated="%d"
name="Test Event Name"
description="Test Event Description"
afterevent="nothing"
eit="56422"
disabled="0"
justplay="0">
</timer>""" % (at + timer_start, at + timer_start + timer_length, repeat)
))
t.record(timer)
# run virtual environment
enigma.run(sim_length)
print "done."
timers = t.processed_timers + t.timer_list
print "start: %s" % (time.ctime(at + 10))
assert len(timers) == 1
for t in timers:
print "begin=%d, end=%d, repeated=%d, state=%d" % (t.begin - at, t.end - at, t.repeated, t.state)
print "begin: %s" % (time.ctime(t.begin))
print "end: %s" % (time.ctime(t.end))
# if repeat, check if the calculated repeated time of day matches the initial time of day
if repeat:
t_initial = time.localtime(at + timer_start)
t_repeated = time.localtime(timers[0].begin)
print t_initial
print t_repeated
if t_initial[3:6] != t_repeated[3:6]:
raise tests.TestError("repeated timer time of day does not match")
import FakeNotifications
#sys.modules["Tools.Notifications"] = FakeNotifications
#sys.modules["Tools.NumericalTextInput.NumericalTextInput"] = FakeNotifications
# required stuff for timer (we try to keep this minimal)
enigma.init_nav()
enigma.init_record_config()
enigma.init_parental_control()
from events import log
import calendar
import os
# we are operating in CET/CEST
os.environ['TZ'] = 'CET'
time.tzset()
#log(test_timer, test_name = "test_timer_repeating", base_time = calendar.timegm((2007, 3, 1, 12, 0, 0)), repeat=0x7f, sim_length = 86400 * 7)
log(test_timer, test_name = "test_timer_repeating_dst_skip", base_time = calendar.timegm((2007, 03, 20, 0, 0, 0)), timer_start = 3600, repeat=0x7f, sim_length = 86400 * 7)
#log(test_timer, test_name = "test_timer_repeating_dst_start", base_time = calendar.timegm((2007, 03, 20, 0, 0, 0)), timer_start = 10000, repeat=0x7f, sim_length = 86400 * 7)
|
Jeongseob/xen-coboost-sched
|
refs/heads/master
|
tools/python/xen/xend/XendVnet.py
|
52
|
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd
#============================================================================
"""Handler for vnet operations.
"""
from xen.util import Brctl
from xen.xend import sxp
from xen.xend.XendError import XendError
from xen.xend.XendLogging import log
from xen.xend.xenstore.xstransact import xstransact
def vnet_cmd(cmd):
out = None
try:
try:
out = file("/proc/vnet/policy", "wb")
sxp.show(cmd, out)
except IOError, ex:
raise XendError(str(ex))
finally:
if out: out.close()
class XendVnetInfo:
vifctl_ops = {'up': 'vif.add', 'down': 'vif.del'}
def __init__(self, dbpath, config=None):
if config:
self.id = str(sxp.child_value(config, 'id'))
self.dbid = self.id.replace(':', '-')
self.dbpath = dbpath + '/' + self.dbid
self.config = config
else:
self.dbpath = dbpath
self.importFromDB()
self.bridge = sxp.child_value(self.config, 'bridge')
if not self.bridge:
self.bridge = "vnet%s" % self.id
self.vnetif = sxp.child_value(self.config, 'vnetif')
if not self.vnetif:
self.vnetif = "vnif%s" % self.id
def exportToDB(self, save=False, sync=False):
to_store = {
'id' : self.id,
'dbid' : self.dbid,
'config' : sxp.to_string(self.config)
}
xstransact.Write(self.dbpath, to_store)
def importFromDB(self):
(self.id, self.dbid, c) = xstransact.Gather(self.dbpath,
('id', str),
('dbid', str),
('config', str))
self.config = sxp.from_string(c)
def sxpr(self):
return self.config
def configure(self):
log.info("Configuring vnet %s", self.id)
val = vnet_cmd(['vnet.add'] + sxp.children(self.config))
Brctl.bridge_create(self.bridge)
Brctl.vif_bridge_add({'bridge': self.bridge, 'vif': self.vnetif})
return val
def delete(self):
log.info("Deleting vnet %s", self.id)
Brctl.vif_bridge_rem({'bridge': self.bridge, 'vif': self.vnetif})
Brctl.bridge_del(self.bridge)
val = vnet_cmd(['vnet.del', self.id])
xstransact.Remove(self.dbpath)
return val
def vifctl(self, op, vif, vmac):
try:
fn = self.vifctl_ops[op]
return vnet_cmd([fn, ['vnet', self.id], ['vif', vif], ['vmac', vmac]])
except XendError:
log.warning("vifctl failed: op=%s vif=%s mac=%s", op, vif, vmac)
class XendVnet:
"""Index of all vnets. Singleton.
"""
dbpath = "/vnet"
def __init__(self):
# Table of vnet info indexed by vnet id.
self.vnet = {}
listing = xstransact.List(self.dbpath)
for entry in listing:
try:
info = XendVnetInfo(self.dbpath + '/' + entry)
self.vnet[info.id] = info
info.configure()
except XendError, ex:
log.warning("Failed to configure vnet %s: %s", str(info.id), str(ex))
except Exception, ex:
log.exception("Vnet error")
xstransact.Remove(self.dbpath + '/' + entry)
def vnet_of_bridge(self, bridge):
"""Get the vnet for a bridge (if any).
@param bridge: bridge name
@return vnet or None
"""
for v in self.vnet.values():
if v.bridge == bridge:
return v
else:
return None
def vnet_ls(self):
"""List all vnet ids.
"""
return self.vnet.keys()
def vnets(self):
"""List all vnets.
"""
return self.vnet.values()
def vnet_get(self, id):
"""Get a vnet.
@param id vnet id
"""
id = str(id)
return self.vnet.get(id)
def vnet_create(self, config):
"""Create a vnet.
@param config: config
"""
info = XendVnetInfo(self.dbpath, config=config)
self.vnet[info.id] = info
info.exportToDB()
info.configure()
def vnet_delete(self, id):
"""Delete a vnet.
@param id: vnet id
"""
info = self.vnet_get(id)
if info:
del self.vnet[id]
info.delete()
def instance():
global inst
try:
inst
except:
inst = XendVnet()
return inst
|
jessstrap/servotk
|
refs/heads/master
|
tests/wpt/web-platform-tests/cors/resources/checkandremove.py
|
253
|
def main(request, response):
token = request.GET.first("token")
if request.server.stash.remove(token) is not None:
return "1"
else:
return "0"
|
michalliu/OpenWrt-Firefly-Libraries
|
refs/heads/master
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_sundry.py
|
84
|
"""Do a minimal test of all the modules that aren't otherwise tested."""
import importlib
import sys
from test import support
import unittest
class TestUntestedModules(unittest.TestCase):
def test_untested_modules_can_be_imported(self):
untested = ('bdb', 'encodings', 'formatter',
'nturl2path', 'tabnanny')
with support.check_warnings(quiet=True):
for name in untested:
try:
support.import_module('test.test_{}'.format(name))
except unittest.SkipTest:
importlib.import_module(name)
else:
self.fail('{} has tests even though test_sundry claims '
'otherwise'.format(name))
import distutils.bcppcompiler
import distutils.ccompiler
import distutils.cygwinccompiler
import distutils.filelist
if sys.platform.startswith('win'):
import distutils.msvccompiler
import distutils.text_file
import distutils.unixccompiler
import distutils.command.bdist_dumb
if sys.platform.startswith('win'):
import distutils.command.bdist_msi
import distutils.command.bdist
import distutils.command.bdist_rpm
import distutils.command.bdist_wininst
import distutils.command.build_clib
import distutils.command.build_ext
import distutils.command.build
import distutils.command.clean
import distutils.command.config
import distutils.command.install_data
import distutils.command.install_egg_info
import distutils.command.install_headers
import distutils.command.install_lib
import distutils.command.register
import distutils.command.sdist
import distutils.command.upload
import html.entities
try:
import tty # Not available on Windows
except ImportError:
if support.verbose:
print("skipping tty")
if __name__ == "__main__":
unittest.main()
|
kubeflow/katib
|
refs/heads/master
|
test/suggestion/v1beta1/test_hyperopt_service.py
|
1
|
# Copyright 2021 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import grpc
import grpc_testing
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.suggestion.v1beta1.hyperopt.service import HyperoptService
class TestHyperopt(unittest.TestCase):
def setUp(self):
servicers = {
api_pb2.DESCRIPTOR.services_by_name['Suggestion']: HyperoptService(
)
}
self.test_server = grpc_testing.server_from_dictionary(
servicers, grpc_testing.strict_real_time())
def test_get_suggestion(self):
trials = [
api_pb2.Trial(
name="test-asfjh",
spec=api_pb2.TrialSpec(
objective=api_pb2.ObjectiveSpec(
type=api_pb2.MAXIMIZE,
objective_metric_name="metric-2",
goal=0.9
),
parameter_assignments=api_pb2.TrialSpec.ParameterAssignments(
assignments=[
api_pb2.ParameterAssignment(
name="param-1",
value="2",
),
api_pb2.ParameterAssignment(
name="param-2",
value="cat1",
),
api_pb2.ParameterAssignment(
name="param-3",
value="2",
),
api_pb2.ParameterAssignment(
name="param-4",
value="3.44",
)
]
)
),
status=api_pb2.TrialStatus(
observation=api_pb2.Observation(
metrics=[
api_pb2.Metric(
name="metric=1",
value="435"
),
api_pb2.Metric(
name="metric=2",
value="5643"
),
]
)
)
),
api_pb2.Trial(
name="test-234hs",
spec=api_pb2.TrialSpec(
objective=api_pb2.ObjectiveSpec(
type=api_pb2.MAXIMIZE,
objective_metric_name="metric-2",
goal=0.9
),
parameter_assignments=api_pb2.TrialSpec.ParameterAssignments(
assignments=[
api_pb2.ParameterAssignment(
name="param-1",
value="3",
),
api_pb2.ParameterAssignment(
name="param-2",
value="cat2",
),
api_pb2.ParameterAssignment(
name="param-3",
value="6",
),
api_pb2.ParameterAssignment(
name="param-4",
value="4.44",
)
]
)
),
status=api_pb2.TrialStatus(
observation=api_pb2.Observation(
metrics=[
api_pb2.Metric(
name="metric=1",
value="123"
),
api_pb2.Metric(
name="metric=2",
value="3028"
),
]
)
)
)
]
experiment = api_pb2.Experiment(
name="test",
spec=api_pb2.ExperimentSpec(
algorithm=api_pb2.AlgorithmSpec(
algorithm_name="tpe",
algorithm_settings=[
api_pb2.AlgorithmSetting(
name="random_state",
value="10"
),
api_pb2.AlgorithmSetting(
name="gamma",
value="0.25"
),
api_pb2.AlgorithmSetting(
name="prior_weight",
value="1.0"
),
api_pb2.AlgorithmSetting(
name="n_EI_candidates",
value="24"
),
],
),
objective=api_pb2.ObjectiveSpec(
type=api_pb2.MAXIMIZE,
goal=0.9
),
parameter_specs=api_pb2.ExperimentSpec.ParameterSpecs(
parameters=[
api_pb2.ParameterSpec(
name="param-1",
parameter_type=api_pb2.INT,
feasible_space=api_pb2.FeasibleSpace(
max="5", min="1", list=[]),
),
api_pb2.ParameterSpec(
name="param-2",
parameter_type=api_pb2.CATEGORICAL,
feasible_space=api_pb2.FeasibleSpace(
max=None, min=None, list=["cat1", "cat2", "cat3"])
),
api_pb2.ParameterSpec(
name="param-3",
parameter_type=api_pb2.DISCRETE,
feasible_space=api_pb2.FeasibleSpace(
max=None, min=None, list=["3", "2", "6"])
),
api_pb2.ParameterSpec(
name="param-4",
parameter_type=api_pb2.DOUBLE,
feasible_space=api_pb2.FeasibleSpace(
max="5", min="1", list=[])
)
]
)
)
)
request = api_pb2.GetSuggestionsRequest(
experiment=experiment,
trials=trials,
request_number=2,
)
get_suggestion = self.test_server.invoke_unary_unary(
method_descriptor=(api_pb2.DESCRIPTOR
.services_by_name['Suggestion']
.methods_by_name['GetSuggestions']),
invocation_metadata={},
request=request, timeout=1)
response, metadata, code, details = get_suggestion.termination()
print(response.parameter_assignments)
self.assertEqual(code, grpc.StatusCode.OK)
self.assertEqual(2, len(response.parameter_assignments))
def test_validate_algorithm_settings(self):
experiment_spec = [None]
def call_validate():
experiment = api_pb2.Experiment(name="test", spec=experiment_spec[0])
request = api_pb2.ValidateAlgorithmSettingsRequest(experiment=experiment)
validate_algorithm_settings = self.test_server.invoke_unary_unary(
method_descriptor=(api_pb2.DESCRIPTOR
.services_by_name['Suggestion']
.methods_by_name['ValidateAlgorithmSettings']),
invocation_metadata={},
request=request, timeout=1)
return validate_algorithm_settings.termination()
# valid cases
algorithm_spec = api_pb2.AlgorithmSpec(
algorithm_name="tpe",
algorithm_settings=[
api_pb2.AlgorithmSetting(
name="random_state",
value="10"
),
api_pb2.AlgorithmSetting(
name="gamma",
value="0.25"
),
api_pb2.AlgorithmSetting(
name="prior_weight",
value="1.0"
),
api_pb2.AlgorithmSetting(
name="n_EI_candidates",
value="24"
),
],
)
experiment_spec[0] = api_pb2.ExperimentSpec(algorithm=algorithm_spec)
self.assertEqual(call_validate()[2], grpc.StatusCode.OK)
# invalid cases
experiment_spec[0] = api_pb2.ExperimentSpec(
algorithm=api_pb2.AlgorithmSpec(algorithm_name="unknown"))
_, _, code, details = call_validate()
self.assertEqual(code, grpc.StatusCode.INVALID_ARGUMENT)
self.assertEqual(details, 'unknown algorithm name unknown')
experiment_spec[0] = api_pb2.ExperimentSpec(
algorithm=api_pb2.AlgorithmSpec(
algorithm_name="random",
algorithm_settings=[
api_pb2.AlgorithmSetting(name="unknown_conf", value="1111")]
))
_, _, code, details = call_validate()
self.assertEqual(code, grpc.StatusCode.INVALID_ARGUMENT)
self.assertEqual(details, 'unknown setting unknown_conf for algorithm random')
experiment_spec[0] = api_pb2.ExperimentSpec(
algorithm=api_pb2.AlgorithmSpec(
algorithm_name="tpe",
algorithm_settings=[
api_pb2.AlgorithmSetting(name="gamma", value="1.5")]
))
_, _, code, details = call_validate()
self.assertEqual(code, grpc.StatusCode.INVALID_ARGUMENT)
self.assertEqual(details, 'gamma should be in the range of (0, 1)')
experiment_spec[0] = api_pb2.ExperimentSpec(
algorithm=api_pb2.AlgorithmSpec(
algorithm_name="tpe",
algorithm_settings=[
api_pb2.AlgorithmSetting(name="n_EI_candidates", value="0")]
))
_, _, code, details = call_validate()
self.assertEqual(code, grpc.StatusCode.INVALID_ARGUMENT)
self.assertEqual(details, 'n_EI_candidates should be great than zero')
experiment_spec[0] = api_pb2.ExperimentSpec(
algorithm=api_pb2.AlgorithmSpec(
algorithm_name="tpe",
algorithm_settings=[
api_pb2.AlgorithmSetting(name="random_state", value="-1")]
))
_, _, code, details = call_validate()
self.assertEqual(code, grpc.StatusCode.INVALID_ARGUMENT)
self.assertEqual(details, 'random_state should be great or equal than zero')
experiment_spec[0] = api_pb2.ExperimentSpec(
algorithm=api_pb2.AlgorithmSpec(
algorithm_name="tpe",
algorithm_settings=[
api_pb2.AlgorithmSetting(name="prior_weight", value="aaa")]
))
_, _, code, details = call_validate()
self.assertEqual(code, grpc.StatusCode.INVALID_ARGUMENT)
self.assertTrue(details.startswith('failed to validate prior_weight(aaa)'))
if __name__ == '__main__':
unittest.main()
|
doismellburning/edx-platform
|
refs/heads/master
|
lms/tests.py
|
54
|
"""Tests for the lms module itself."""
import mimetypes
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from edxmako import add_lookup, LOOKUP
from lms import startup
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class LmsModuleTests(TestCase):
"""
Tests for lms module itself.
"""
def test_new_mimetypes(self):
extensions = ['eot', 'otf', 'ttf', 'woff']
for extension in extensions:
mimetype, _ = mimetypes.guess_type('test.' + extension)
self.assertIsNotNone(mimetype)
class TemplateLookupTests(TestCase):
"""
Tests for TemplateLookup.
"""
def test_add_lookup_to_main(self):
"""Test that any template directories added are not cleared when microsites are enabled."""
add_lookup('main', 'external_module', __name__)
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
# This should not clear the directories list
startup.enable_microsites()
directories = LOOKUP['main'].directories
self.assertEqual(len([dir for dir in directories if 'external_module' in dir]), 1)
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_FEEDBACK_SUBMISSION': True})
class HelpModalTests(ModuleStoreTestCase):
"""Tests for the help modal"""
def setUp(self):
super(HelpModalTests, self).setUp()
self.course = CourseFactory.create()
def test_simple_test(self):
"""
Simple test to make sure that you don't get a 500 error when the modal
is enabled.
"""
url = reverse('info', args=[self.course.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
|
adw0rd/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/django/views/generic/date_based.py
|
65
|
import datetime
import time
from django.template import loader, RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.core.xheaders import populate_xheaders
from django.db.models.fields import DateTimeField
from django.http import Http404, HttpResponse
def archive_index(request, queryset, date_field, num_latest=15,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
mimetype=None, allow_future=False, template_object_name='latest'):
"""
Generic top-level archive of date-based objects.
Templates: ``<app_label>/<model_name>_archive.html``
Context:
date_list
List of years
latest
Latest N (defaults to 15) objects by date
"""
if extra_context is None: extra_context = {}
model = queryset.model
if not allow_future:
queryset = queryset.filter(**{'%s__lte' % date_field: datetime.datetime.now()})
date_list = queryset.dates(date_field, 'year')[::-1]
if not date_list and not allow_empty:
raise Http404("No %s available" % model._meta.verbose_name)
if date_list and num_latest:
latest = queryset.order_by('-'+date_field)[:num_latest]
else:
latest = None
if not template_name:
template_name = "%s/%s_archive.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list' : date_list,
template_object_name : latest,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_year(request, year, queryset, date_field, template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object', mimetype=None,
make_object_list=False, allow_future=False):
"""
Generic yearly archive view.
Templates: ``<app_label>/<model_name>_archive_year.html``
Context:
date_list
List of months in this year with objects
year
This year
object_list
List of objects published in the given month
(Only available if make_object_list argument is True)
"""
if extra_context is None: extra_context = {}
model = queryset.model
now = datetime.datetime.now()
lookup_kwargs = {'%s__year' % date_field: year}
# Only bother to check current date if the year isn't in the past and future objects aren't requested.
if int(year) >= now.year and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
date_list = queryset.filter(**lookup_kwargs).dates(date_field, 'month')
if not date_list and not allow_empty:
raise Http404
if make_object_list:
object_list = queryset.filter(**lookup_kwargs)
else:
object_list = []
if not template_name:
template_name = "%s/%s_archive_year.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'year': year,
'%s_list' % template_object_name: object_list,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_month(request, year, month, queryset, date_field,
month_format='%b', template_name=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic monthly archive view.
Templates: ``<app_label>/<model_name>_archive_month.html``
Context:
date_list:
List of days in this month with objects
month:
(date) this month
next_month:
(date) the first day of the next month, or None if the next month is in the future
previous_month:
(date) the first day of the previous month
object_list:
list of objects published in the given month
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime("%s-%s" % (year, month), '%s-%s' % ('%Y', month_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of month, for use in a date-range lookup.
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the month isn't in the past and future objects are requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
date_list = object_list.dates(date_field, 'day')
if not object_list and not allow_empty:
raise Http404
# Calculate the next month, if applicable.
if allow_future:
next_month = last_day
elif last_day <= datetime.date.today():
next_month = last_day
else:
next_month = None
# Calculate the previous month
if first_day.month == 1:
previous_month = first_day.replace(year=first_day.year-1,month=12)
else:
previous_month = first_day.replace(month=first_day.month-1)
if not template_name:
template_name = "%s/%s_archive_month.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'date_list': date_list,
'%s_list' % template_object_name: object_list,
'month': date,
'next_month': next_month,
'previous_month': previous_month,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_week(request, year, week, queryset, date_field,
template_name=None, template_loader=loader,
extra_context=None, allow_empty=True, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic weekly archive view.
Templates: ``<app_label>/<model_name>_archive_week.html``
Context:
week:
(date) this week
object_list:
list of objects published in the given week
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime(year+'-0-'+week, '%Y-%w-%U')
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
# Calculate first and last day of week, for use in a date-range lookup.
first_day = date
last_day = date + datetime.timedelta(days=7)
lookup_kwargs = {
'%s__gte' % date_field: first_day,
'%s__lt' % date_field: last_day,
}
# Only bother to check current date if the week isn't in the past and future objects aren't requested.
if last_day >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not object_list and not allow_empty:
raise Http404
if not template_name:
template_name = "%s/%s_archive_week.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'week': date,
})
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_day(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', template_name=None,
template_loader=loader, extra_context=None, allow_empty=False,
context_processors=None, template_object_name='object',
mimetype=None, allow_future=False):
"""
Generic daily archive view.
Templates: ``<app_label>/<model_name>_archive_day.html``
Context:
object_list:
list of objects published that day
day:
(datetime) the day
previous_day
(datetime) the previous day
next_day
(datetime) the next day, or None if the current day is today
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
object_list = queryset.filter(**lookup_kwargs)
if not allow_empty and not object_list:
raise Http404
# Calculate the next day, if applicable.
if allow_future:
next_day = date + datetime.timedelta(days=1)
elif date < datetime.date.today():
next_day = date + datetime.timedelta(days=1)
else:
next_day = None
if not template_name:
template_name = "%s/%s_archive_day.html" % (model._meta.app_label, model._meta.object_name.lower())
t = template_loader.get_template(template_name)
c = RequestContext(request, {
'%s_list' % template_object_name: object_list,
'day': date,
'previous_day': date - datetime.timedelta(days=1),
'next_day': next_day,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
return HttpResponse(t.render(c), mimetype=mimetype)
def archive_today(request, **kwargs):
"""
Generic daily archive view for today. Same as archive_day view.
"""
today = datetime.date.today()
kwargs.update({
'year': str(today.year),
'month': today.strftime('%b').lower(),
'day': str(today.day),
})
return archive_day(request, **kwargs)
def object_detail(request, year, month, day, queryset, date_field,
month_format='%b', day_format='%d', object_id=None, slug=None,
slug_field='slug', template_name=None, template_name_field=None,
template_loader=loader, extra_context=None, context_processors=None,
template_object_name='object', mimetype=None, allow_future=False):
"""
Generic detail view from year/month/day/slug or year/month/day/id structure.
Templates: ``<app_label>/<model_name>_detail.html``
Context:
object:
the object to be detailed
"""
if extra_context is None: extra_context = {}
try:
tt = time.strptime('%s-%s-%s' % (year, month, day),
'%s-%s-%s' % ('%Y', month_format, day_format))
date = datetime.date(*tt[:3])
except ValueError:
raise Http404
model = queryset.model
now = datetime.datetime.now()
if isinstance(model._meta.get_field(date_field), DateTimeField):
lookup_kwargs = {'%s__range' % date_field: (datetime.datetime.combine(date, datetime.time.min), datetime.datetime.combine(date, datetime.time.max))}
else:
lookup_kwargs = {date_field: date}
# Only bother to check current date if the date isn't in the past and future objects aren't requested.
if date >= now.date() and not allow_future:
lookup_kwargs['%s__lte' % date_field] = now
if object_id:
lookup_kwargs['%s__exact' % model._meta.pk.name] = object_id
elif slug and slug_field:
lookup_kwargs['%s__exact' % slug_field] = slug
else:
raise AttributeError("Generic detail view must be called with either an object_id or a slug/slugfield")
try:
obj = queryset.get(**lookup_kwargs)
except ObjectDoesNotExist:
raise Http404("No %s found for" % model._meta.verbose_name)
if not template_name:
template_name = "%s/%s_detail.html" % (model._meta.app_label, model._meta.object_name.lower())
if template_name_field:
template_name_list = [getattr(obj, template_name_field), template_name]
t = template_loader.select_template(template_name_list)
else:
t = template_loader.get_template(template_name)
c = RequestContext(request, {
template_object_name: obj,
}, context_processors)
for key, value in extra_context.items():
if callable(value):
c[key] = value()
else:
c[key] = value
response = HttpResponse(t.render(c), mimetype=mimetype)
populate_xheaders(request, response, model, getattr(obj, obj._meta.pk.name))
return response
|
houchj/selenium
|
refs/heads/master
|
py/selenium/webdriver/safari/__init__.py
|
2454
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
|
zhinaonet/sqlmap-z
|
refs/heads/master
|
lib/core/target.py
|
1
|
#!/usr/bin/env python
"""
Copyright (c) 2006-2017 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import codecs
import functools
import os
import re
import subprocess
import sys
import tempfile
import time
import urlparse
from lib.core.common import Backend
from lib.core.common import getSafeExString
from lib.core.common import getUnicode
from lib.core.common import hashDBRetrieve
from lib.core.common import intersect
from lib.core.common import isNumPosStrValue
from lib.core.common import normalizeUnicode
from lib.core.common import openFile
from lib.core.common import paramToDict
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import resetCookieJar
from lib.core.common import urldecode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import mergedOptions
from lib.core.data import paths
from lib.core.datatype import InjectionDict
from lib.core.dicts import DBMS_DICT
from lib.core.dump import dumper
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import MKSTEMP_PREFIX
from lib.core.enums import PLACE
from lib.core.enums import POST_HINT
from lib.core.exception import SqlmapFilePathException
from lib.core.exception import SqlmapGenericException
from lib.core.exception import SqlmapMissingPrivileges
from lib.core.exception import SqlmapSystemException
from lib.core.exception import SqlmapUserQuitException
from lib.core.option import _setDBMS
from lib.core.option import _setKnowledgeBaseAttributes
from lib.core.option import _setAuthCred
from lib.core.settings import ASTERISK_MARKER
from lib.core.settings import CSRF_TOKEN_PARAMETER_INFIXES
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import HOST_ALIASES
from lib.core.settings import ARRAY_LIKE_RECOGNITION_REGEX
from lib.core.settings import JSON_RECOGNITION_REGEX
from lib.core.settings import JSON_LIKE_RECOGNITION_REGEX
from lib.core.settings import MULTIPART_RECOGNITION_REGEX
from lib.core.settings import PROBLEMATIC_CUSTOM_INJECTION_PATTERNS
from lib.core.settings import REFERER_ALIASES
from lib.core.settings import RESTORE_MERGED_OPTIONS
from lib.core.settings import RESULTS_FILE_FORMAT
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import UNENCODED_ORIGINAL_VALUE
from lib.core.settings import UNICODE_ENCODING
from lib.core.settings import UNKNOWN_DBMS_VERSION
from lib.core.settings import URI_INJECTABLE_REGEX
from lib.core.settings import USER_AGENT_ALIASES
from lib.core.settings import XML_RECOGNITION_REGEX
from lib.utils.hashdb import HashDB
from thirdparty.odict.odict import OrderedDict
def _setRequestParams():
"""
Check and set the parameters and perform checks on 'data' option for
HTTP method POST.
"""
if conf.direct:
conf.parameters[None] = "direct connection"
return
testableParameters = False
# Perform checks on GET parameters
if conf.parameters.get(PLACE.GET):
parameters = conf.parameters[PLACE.GET]
paramDict = paramToDict(PLACE.GET, parameters)
if paramDict:
conf.paramDict[PLACE.GET] = paramDict
testableParameters = True
# Perform checks on POST parameters
if conf.method == HTTPMETHOD.POST and conf.data is None:
logger.warn("detected empty POST body")
conf.data = ""
if conf.data is not None:
conf.method = HTTPMETHOD.POST if not conf.method or conf.method == HTTPMETHOD.GET else conf.method
hintNames = []
def process(match, repl):
retVal = match.group(0)
if not (conf.testParameter and match.group("name") not in conf.testParameter):
retVal = repl
while True:
_ = re.search(r"\\g<([^>]+)>", retVal)
if _:
retVal = retVal.replace(_.group(0), match.group(int(_.group(1)) if _.group(1).isdigit() else _.group(1)))
else:
break
if kb.customInjectionMark in retVal:
hintNames.append((retVal.split(kb.customInjectionMark)[0], match.group("name")))
return retVal
if kb.processUserMarks is None and kb.customInjectionMark in conf.data:
message = "custom injection marker ('%s') found in option " % kb.customInjectionMark
message += "'--data'. Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
else:
kb.processUserMarks = choice == 'Y'
if kb.processUserMarks:
kb.testOnlyCustom = True
if re.search(JSON_RECOGNITION_REGEX, conf.data):
message = "JSON data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
if not (kb.processUserMarks and kb.customInjectionMark in conf.data):
conf.data = getattr(conf.data, UNENCODED_ORIGINAL_VALUE, conf.data)
conf.data = conf.data.replace(kb.customInjectionMark, ASTERISK_MARKER)
conf.data = re.sub(r'("(?P<name>[^"]+)"\s*:\s*"[^"]+)"', functools.partial(process, repl=r'\g<1>%s"' % kb.customInjectionMark), conf.data)
conf.data = re.sub(r'("(?P<name>[^"]+)"\s*:\s*)(-?\d[\d\.]*\b)', functools.partial(process, repl=r'\g<0>%s' % kb.customInjectionMark), conf.data)
match = re.search(r'(?P<name>[^"]+)"\s*:\s*\[([^\]]+)\]', conf.data)
if match and not (conf.testParameter and match.group("name") not in conf.testParameter):
_ = match.group(2)
_ = re.sub(r'("[^"]+)"', '\g<1>%s"' % kb.customInjectionMark, _)
_ = re.sub(r'(\A|,|\s+)(-?\d[\d\.]*\b)', '\g<0>%s' % kb.customInjectionMark, _)
conf.data = conf.data.replace(match.group(0), match.group(0).replace(match.group(2), _))
kb.postHint = POST_HINT.JSON
elif re.search(JSON_LIKE_RECOGNITION_REGEX, conf.data):
message = "JSON-like data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
if not (kb.processUserMarks and kb.customInjectionMark in conf.data):
conf.data = getattr(conf.data, UNENCODED_ORIGINAL_VALUE, conf.data)
conf.data = conf.data.replace(kb.customInjectionMark, ASTERISK_MARKER)
conf.data = re.sub(r"('(?P<name>[^']+)'\s*:\s*'[^']+)'", functools.partial(process, repl=r"\g<1>%s'" % kb.customInjectionMark), conf.data)
conf.data = re.sub(r"('(?P<name>[^']+)'\s*:\s*)(-?\d[\d\.]*\b)", functools.partial(process, repl=r"\g<0>%s" % kb.customInjectionMark), conf.data)
kb.postHint = POST_HINT.JSON_LIKE
elif re.search(ARRAY_LIKE_RECOGNITION_REGEX, conf.data):
message = "Array-like data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
if not (kb.processUserMarks and kb.customInjectionMark in conf.data):
conf.data = conf.data.replace(kb.customInjectionMark, ASTERISK_MARKER)
conf.data = re.sub(r"(=[^%s]+)" % DEFAULT_GET_POST_DELIMITER, r"\g<1>%s" % kb.customInjectionMark, conf.data)
kb.postHint = POST_HINT.ARRAY_LIKE
elif re.search(XML_RECOGNITION_REGEX, conf.data):
message = "SOAP/XML data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
if not (kb.processUserMarks and kb.customInjectionMark in conf.data):
conf.data = getattr(conf.data, UNENCODED_ORIGINAL_VALUE, conf.data)
conf.data = conf.data.replace(kb.customInjectionMark, ASTERISK_MARKER)
conf.data = re.sub(r"(<(?P<name>[^>]+)( [^<]*)?>)([^<]+)(</\2)", functools.partial(process, repl=r"\g<1>\g<4>%s\g<5>" % kb.customInjectionMark), conf.data)
kb.postHint = POST_HINT.SOAP if "soap" in conf.data.lower() else POST_HINT.XML
elif re.search(MULTIPART_RECOGNITION_REGEX, conf.data):
message = "Multipart-like data found in %s data. " % conf.method
message += "Do you want to process it? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
if not (kb.processUserMarks and kb.customInjectionMark in conf.data):
conf.data = getattr(conf.data, UNENCODED_ORIGINAL_VALUE, conf.data)
conf.data = conf.data.replace(kb.customInjectionMark, ASTERISK_MARKER)
conf.data = re.sub(r"(?si)((Content-Disposition[^\n]+?name\s*=\s*[\"']?(?P<name>[^\"'\r\n]+)[\"']?).+?)(((\r)?\n)+--)", functools.partial(process, repl=r"\g<1>%s\g<4>" % kb.customInjectionMark), conf.data)
kb.postHint = POST_HINT.MULTIPART
if not kb.postHint:
if kb.customInjectionMark in conf.data: # later processed
pass
else:
place = PLACE.POST
conf.parameters[place] = conf.data
paramDict = paramToDict(place, conf.data)
if paramDict:
conf.paramDict[place] = paramDict
testableParameters = True
else:
if kb.customInjectionMark not in conf.data: # in case that no usable parameter values has been found
conf.parameters[PLACE.POST] = conf.data
kb.processUserMarks = True if (kb.postHint and kb.customInjectionMark in conf.data) else kb.processUserMarks
if re.search(URI_INJECTABLE_REGEX, conf.url, re.I) and not any(place in conf.parameters for place in (PLACE.GET, PLACE.POST)) and not kb.postHint and not kb.customInjectionMark in (conf.data or "") and conf.url.startswith("http"):
warnMsg = "you've provided target URL without any GET "
warnMsg += "parameters (e.g. 'http://www.site.com/article.php?id=1') "
warnMsg += "and without providing any POST parameters "
warnMsg += "through option '--data'"
logger.warn(warnMsg)
message = "do you want to try URI injections "
message += "in the target URL itself? [Y/n/q] "
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
elif choice == 'Y':
conf.url = "%s%s" % (conf.url, kb.customInjectionMark)
kb.processUserMarks = True
for place, value in ((PLACE.URI, conf.url), (PLACE.CUSTOM_POST, conf.data), (PLACE.CUSTOM_HEADER, str(conf.httpHeaders))):
_ = re.sub(PROBLEMATIC_CUSTOM_INJECTION_PATTERNS, "", value or "") if place == PLACE.CUSTOM_HEADER else value or ""
if kb.customInjectionMark in _:
if kb.processUserMarks is None:
lut = {PLACE.URI: '-u', PLACE.CUSTOM_POST: '--data', PLACE.CUSTOM_HEADER: '--headers/--user-agent/--referer/--cookie'}
message = "custom injection marker ('%s') found in option " % kb.customInjectionMark
message += "'%s'. Do you want to process it? [Y/n/q] " % lut[place]
choice = readInput(message, default='Y').upper()
if choice == 'Q':
raise SqlmapUserQuitException
else:
kb.processUserMarks = choice == 'Y'
if kb.processUserMarks:
kb.testOnlyCustom = True
if "=%s" % kb.customInjectionMark in _:
warnMsg = "it seems that you've provided empty parameter value(s) "
warnMsg += "for testing. Please, always use only valid parameter values "
warnMsg += "so sqlmap could be able to run properly"
logger.warn(warnMsg)
if not kb.processUserMarks:
if place == PLACE.URI:
query = urlparse.urlsplit(value).query
if query:
parameters = conf.parameters[PLACE.GET] = query
paramDict = paramToDict(PLACE.GET, parameters)
if paramDict:
conf.url = conf.url.split('?')[0]
conf.paramDict[PLACE.GET] = paramDict
testableParameters = True
elif place == PLACE.CUSTOM_POST:
conf.parameters[PLACE.POST] = conf.data
paramDict = paramToDict(PLACE.POST, conf.data)
if paramDict:
conf.paramDict[PLACE.POST] = paramDict
testableParameters = True
else:
conf.parameters[place] = value
conf.paramDict[place] = OrderedDict()
if place == PLACE.CUSTOM_HEADER:
for index in xrange(len(conf.httpHeaders)):
header, value = conf.httpHeaders[index]
if kb.customInjectionMark in re.sub(PROBLEMATIC_CUSTOM_INJECTION_PATTERNS, "", value):
parts = value.split(kb.customInjectionMark)
for i in xrange(len(parts) - 1):
conf.paramDict[place]["%s #%d%s" % (header, i + 1, kb.customInjectionMark)] = "%s,%s" % (header, "".join("%s%s" % (parts[j], kb.customInjectionMark if i == j else "") for j in xrange(len(parts))))
conf.httpHeaders[index] = (header, value.replace(kb.customInjectionMark, ""))
else:
parts = value.split(kb.customInjectionMark)
for i in xrange(len(parts) - 1):
name = None
if kb.postHint:
for ending, _ in hintNames:
if parts[i].endswith(ending):
name = "%s %s" % (kb.postHint, _)
break
if name is None:
name = "%s#%s%s" % (("%s " % kb.postHint) if kb.postHint else "", i + 1, kb.customInjectionMark)
conf.paramDict[place][name] = "".join("%s%s" % (parts[j], kb.customInjectionMark if i == j else "") for j in xrange(len(parts)))
if place == PLACE.URI and PLACE.GET in conf.paramDict:
del conf.paramDict[PLACE.GET]
elif place == PLACE.CUSTOM_POST and PLACE.POST in conf.paramDict:
del conf.paramDict[PLACE.POST]
testableParameters = True
if kb.processUserMarks:
for item in ("url", "data", "agent", "referer", "cookie"):
if conf.get(item):
conf[item] = conf[item].replace(kb.customInjectionMark, "")
# Perform checks on Cookie parameters
if conf.cookie:
conf.parameters[PLACE.COOKIE] = conf.cookie
paramDict = paramToDict(PLACE.COOKIE, conf.cookie)
if paramDict:
conf.paramDict[PLACE.COOKIE] = paramDict
testableParameters = True
# Perform checks on header values
if conf.httpHeaders:
for httpHeader, headerValue in list(conf.httpHeaders):
# Url encoding of the header values should be avoided
# Reference: http://stackoverflow.com/questions/5085904/is-ok-to-urlencode-the-value-in-headerlocation-value
if httpHeader.title() == HTTP_HEADER.USER_AGENT:
conf.parameters[PLACE.USER_AGENT] = urldecode(headerValue)
condition = any((not conf.testParameter, intersect(conf.testParameter, USER_AGENT_ALIASES, True)))
if condition:
conf.paramDict[PLACE.USER_AGENT] = {PLACE.USER_AGENT: headerValue}
testableParameters = True
elif httpHeader.title() == HTTP_HEADER.REFERER:
conf.parameters[PLACE.REFERER] = urldecode(headerValue)
condition = any((not conf.testParameter, intersect(conf.testParameter, REFERER_ALIASES, True)))
if condition:
conf.paramDict[PLACE.REFERER] = {PLACE.REFERER: headerValue}
testableParameters = True
elif httpHeader.title() == HTTP_HEADER.HOST:
conf.parameters[PLACE.HOST] = urldecode(headerValue)
condition = any((not conf.testParameter, intersect(conf.testParameter, HOST_ALIASES, True)))
if condition:
conf.paramDict[PLACE.HOST] = {PLACE.HOST: headerValue}
testableParameters = True
else:
condition = intersect(conf.testParameter, [httpHeader], True)
if condition:
conf.parameters[PLACE.CUSTOM_HEADER] = str(conf.httpHeaders)
conf.paramDict[PLACE.CUSTOM_HEADER] = {httpHeader: "%s,%s%s" % (httpHeader, headerValue, kb.customInjectionMark)}
conf.httpHeaders = [(header, value.replace(kb.customInjectionMark, "")) for header, value in conf.httpHeaders]
testableParameters = True
if not conf.parameters:
errMsg = "you did not provide any GET, POST and Cookie "
errMsg += "parameter, neither an User-Agent, Referer or Host header value"
raise SqlmapGenericException(errMsg)
elif not testableParameters:
errMsg = "all testable parameters you provided are not present "
errMsg += "within the given request data"
raise SqlmapGenericException(errMsg)
if conf.csrfToken:
if not any(conf.csrfToken in _ for _ in (conf.paramDict.get(PLACE.GET, {}), conf.paramDict.get(PLACE.POST, {}))) and not re.search(r"\b%s\b" % re.escape(conf.csrfToken), conf.data or "") and not conf.csrfToken in set(_[0].lower() for _ in conf.httpHeaders) and not conf.csrfToken in conf.paramDict.get(PLACE.COOKIE, {}):
errMsg = "anti-CSRF token parameter '%s' not " % conf.csrfToken
errMsg += "found in provided GET, POST, Cookie or header values"
raise SqlmapGenericException(errMsg)
else:
for place in (PLACE.GET, PLACE.POST, PLACE.COOKIE):
for parameter in conf.paramDict.get(place, {}):
if any(parameter.lower().count(_) for _ in CSRF_TOKEN_PARAMETER_INFIXES):
message = "%s parameter '%s' appears to hold anti-CSRF token. " % (place, parameter)
message += "Do you want sqlmap to automatically update it in further requests? [y/N] "
if readInput(message, default='N', boolean=True):
conf.csrfToken = getUnicode(parameter)
break
def _setHashDB():
"""
Check and set the HashDB SQLite file for query resume functionality.
"""
if not conf.hashDBFile:
conf.hashDBFile = conf.sessionFile or os.path.join(conf.outputPath, "session.sqlite")
if os.path.exists(conf.hashDBFile):
if conf.flushSession:
try:
os.remove(conf.hashDBFile)
logger.info("flushing session file")
except OSError, msg:
errMsg = "unable to flush the session file (%s)" % msg
raise SqlmapFilePathException(errMsg)
conf.hashDB = HashDB(conf.hashDBFile)
def _resumeHashDBValues():
"""
Resume stored data values from HashDB
"""
kb.absFilePaths = hashDBRetrieve(HASHDB_KEYS.KB_ABS_FILE_PATHS, True) or kb.absFilePaths
kb.brute.tables = hashDBRetrieve(HASHDB_KEYS.KB_BRUTE_TABLES, True) or kb.brute.tables
kb.brute.columns = hashDBRetrieve(HASHDB_KEYS.KB_BRUTE_COLUMNS, True) or kb.brute.columns
kb.chars = hashDBRetrieve(HASHDB_KEYS.KB_CHARS, True) or kb.chars
kb.dynamicMarkings = hashDBRetrieve(HASHDB_KEYS.KB_DYNAMIC_MARKINGS, True) or kb.dynamicMarkings
kb.xpCmdshellAvailable = hashDBRetrieve(HASHDB_KEYS.KB_XP_CMDSHELL_AVAILABLE) or kb.xpCmdshellAvailable
kb.errorChunkLength = hashDBRetrieve(HASHDB_KEYS.KB_ERROR_CHUNK_LENGTH)
if isNumPosStrValue(kb.errorChunkLength):
kb.errorChunkLength = int(kb.errorChunkLength)
else:
kb.errorChunkLength = None
conf.tmpPath = conf.tmpPath or hashDBRetrieve(HASHDB_KEYS.CONF_TMP_PATH)
for injection in hashDBRetrieve(HASHDB_KEYS.KB_INJECTIONS, True) or []:
if isinstance(injection, InjectionDict) and injection.place in conf.paramDict and \
injection.parameter in conf.paramDict[injection.place]:
if not conf.tech or intersect(conf.tech, injection.data.keys()):
if intersect(conf.tech, injection.data.keys()):
injection.data = dict(_ for _ in injection.data.items() if _[0] in conf.tech)
if injection not in kb.injections:
kb.injections.append(injection)
_resumeDBMS()
_resumeOS()
def _resumeDBMS():
"""
Resume stored DBMS information from HashDB
"""
value = hashDBRetrieve(HASHDB_KEYS.DBMS)
if not value:
return
dbms = value.lower()
dbmsVersion = [UNKNOWN_DBMS_VERSION]
_ = "(%s)" % ("|".join([alias for alias in SUPPORTED_DBMS]))
_ = re.search(r"\A%s (.*)" % _, dbms, re.I)
if _:
dbms = _.group(1).lower()
dbmsVersion = [_.group(2)]
if conf.dbms:
check = True
for aliases, _, _, _ in DBMS_DICT.values():
if conf.dbms.lower() in aliases and dbms not in aliases:
check = False
break
if not check:
message = "you provided '%s' as a back-end DBMS, " % conf.dbms
message += "but from a past scan information on the target URL "
message += "sqlmap assumes the back-end DBMS is '%s'. " % dbms
message += "Do you really want to force the back-end "
message += "DBMS value? [y/N] "
if not readInput(message, default='N', boolean=True):
conf.dbms = None
Backend.setDbms(dbms)
Backend.setVersionList(dbmsVersion)
else:
infoMsg = "resuming back-end DBMS '%s' " % dbms
logger.info(infoMsg)
Backend.setDbms(dbms)
Backend.setVersionList(dbmsVersion)
def _resumeOS():
"""
Resume stored OS information from HashDB
"""
value = hashDBRetrieve(HASHDB_KEYS.OS)
if not value:
return
os = value
if os and os != 'None':
infoMsg = "resuming back-end DBMS operating system '%s' " % os
logger.info(infoMsg)
if conf.os and conf.os.lower() != os.lower():
message = "you provided '%s' as back-end DBMS operating " % conf.os
message += "system, but from a past scan information on the "
message += "target URL sqlmap assumes the back-end DBMS "
message += "operating system is %s. " % os
message += "Do you really want to force the back-end DBMS "
message += "OS value? [y/N] "
if not readInput(message, default='N', boolean=True):
conf.os = os
else:
conf.os = os
Backend.setOs(conf.os)
def _setResultsFile():
"""
Create results file for storing results of running in a
multiple target mode.
"""
if not conf.multipleTargets:
return
if not conf.resultsFP:
conf.resultsFilename = os.path.join(paths.SQLMAP_OUTPUT_PATH, time.strftime(RESULTS_FILE_FORMAT).lower())
try:
conf.resultsFP = openFile(conf.resultsFilename, "w+", UNICODE_ENCODING, buffering=0)
except (OSError, IOError), ex:
try:
warnMsg = "unable to create results file '%s' ('%s'). " % (conf.resultsFilename, getUnicode(ex))
handle, conf.resultsFilename = tempfile.mkstemp(prefix=MKSTEMP_PREFIX.RESULTS, suffix=".csv")
os.close(handle)
conf.resultsFP = openFile(conf.resultsFilename, "w+", UNICODE_ENCODING, buffering=0)
warnMsg += "Using temporary file '%s' instead" % conf.resultsFilename
logger.warn(warnMsg)
except IOError, _:
errMsg = "unable to write to the temporary directory ('%s'). " % _
errMsg += "Please make sure that your disk is not full and "
errMsg += "that you have sufficient write permissions to "
errMsg += "create temporary files and/or directories"
raise SqlmapSystemException(errMsg)
conf.resultsFP.writelines("Target URL,Place,Parameter,Technique(s),Note(s)%s" % os.linesep)
logger.info("using '%s' as the CSV results file in multiple targets mode" % conf.resultsFilename)
def _createFilesDir():
"""
Create the file directory.
"""
if not conf.rFile:
return
conf.filePath = paths.SQLMAP_FILES_PATH % conf.hostname
if not os.path.isdir(conf.filePath):
try:
os.makedirs(conf.filePath, 0755)
except OSError, ex:
tempDir = tempfile.mkdtemp(prefix="sqlmapfiles")
warnMsg = "unable to create files directory "
warnMsg += "'%s' (%s). " % (conf.filePath, getUnicode(ex))
warnMsg += "Using temporary directory '%s' instead" % tempDir
logger.warn(warnMsg)
conf.filePath = tempDir
def _createDumpDir():
"""
Create the dump directory.
"""
if not conf.dumpTable and not conf.dumpAll and not conf.search:
return
conf.dumpPath = paths.SQLMAP_DUMP_PATH % conf.hostname
if not os.path.isdir(conf.dumpPath):
try:
os.makedirs(conf.dumpPath, 0755)
except OSError, ex:
tempDir = tempfile.mkdtemp(prefix="sqlmapdump")
warnMsg = "unable to create dump directory "
warnMsg += "'%s' (%s). " % (conf.dumpPath, getUnicode(ex))
warnMsg += "Using temporary directory '%s' instead" % tempDir
logger.warn(warnMsg)
conf.dumpPath = tempDir
def _configureDumper():
conf.dumper = dumper
conf.dumper.setOutputFile()
def _createTargetDirs():
"""
Create the output directory.
"""
try:
if not os.path.isdir(paths.SQLMAP_OUTPUT_PATH):
os.makedirs(paths.SQLMAP_OUTPUT_PATH, 0755)
_ = os.path.join(paths.SQLMAP_OUTPUT_PATH, randomStr())
open(_, "w+b").close()
os.remove(_)
if conf.outputDir:
warnMsg = "using '%s' as the output directory" % paths.SQLMAP_OUTPUT_PATH
logger.warn(warnMsg)
except (OSError, IOError), ex:
try:
tempDir = tempfile.mkdtemp(prefix="sqlmapoutput")
except Exception, _:
errMsg = "unable to write to the temporary directory ('%s'). " % _
errMsg += "Please make sure that your disk is not full and "
errMsg += "that you have sufficient write permissions to "
errMsg += "create temporary files and/or directories"
raise SqlmapSystemException(errMsg)
warnMsg = "unable to %s output directory " % ("create" if not os.path.isdir(paths.SQLMAP_OUTPUT_PATH) else "write to the")
warnMsg += "'%s' (%s). " % (paths.SQLMAP_OUTPUT_PATH, getUnicode(ex))
warnMsg += "Using temporary directory '%s' instead" % getUnicode(tempDir)
logger.warn(warnMsg)
paths.SQLMAP_OUTPUT_PATH = tempDir
conf.outputPath = os.path.join(getUnicode(paths.SQLMAP_OUTPUT_PATH), normalizeUnicode(getUnicode(conf.hostname)))
if not os.path.isdir(conf.outputPath):
try:
os.makedirs(conf.outputPath, 0755)
except (OSError, IOError), ex:
try:
tempDir = tempfile.mkdtemp(prefix="sqlmapoutput")
except Exception, _:
errMsg = "unable to write to the temporary directory ('%s'). " % _
errMsg += "Please make sure that your disk is not full and "
errMsg += "that you have sufficient write permissions to "
errMsg += "create temporary files and/or directories"
raise SqlmapSystemException(errMsg)
warnMsg = "unable to create output directory "
warnMsg += "'%s' (%s). " % (conf.outputPath, getUnicode(ex))
warnMsg += "Using temporary directory '%s' instead" % getUnicode(tempDir)
logger.warn(warnMsg)
conf.outputPath = tempDir
try:
with codecs.open(os.path.join(conf.outputPath, "target.txt"), "w+", UNICODE_ENCODING) as f:
f.write(kb.originalUrls.get(conf.url) or conf.url or conf.hostname)
f.write(" (%s)" % (HTTPMETHOD.POST if conf.data else HTTPMETHOD.GET))
f.write(" # %s" % getUnicode(subprocess.list2cmdline(sys.argv), encoding=sys.stdin.encoding))
if conf.data:
f.write("\n\n%s" % getUnicode(conf.data))
except IOError, ex:
if "denied" in getUnicode(ex):
errMsg = "you don't have enough permissions "
else:
errMsg = "something went wrong while trying "
errMsg += "to write to the output directory '%s' (%s)" % (paths.SQLMAP_OUTPUT_PATH, getSafeExString(ex))
raise SqlmapMissingPrivileges(errMsg)
_createDumpDir()
_createFilesDir()
_configureDumper()
def _restoreMergedOptions():
"""
Restore merged options (command line, configuration file and default values)
that could be possibly changed during the testing of previous target.
"""
for option in RESTORE_MERGED_OPTIONS:
conf[option] = mergedOptions[option]
def initTargetEnv():
"""
Initialize target environment.
"""
if conf.multipleTargets:
if conf.hashDB:
conf.hashDB.close()
if conf.cj:
resetCookieJar(conf.cj)
conf.paramDict = {}
conf.parameters = {}
conf.hashDBFile = None
_setKnowledgeBaseAttributes(False)
_restoreMergedOptions()
_setDBMS()
if conf.data:
class _(unicode):
pass
kb.postUrlEncode = True
for key, value in conf.httpHeaders:
if key.upper() == HTTP_HEADER.CONTENT_TYPE.upper():
kb.postUrlEncode = "urlencoded" in value
break
if kb.postUrlEncode:
original = conf.data
conf.data = _(urldecode(conf.data))
setattr(conf.data, UNENCODED_ORIGINAL_VALUE, original)
kb.postSpaceToPlus = '+' in original
def setupTargetEnv():
_createTargetDirs()
_setRequestParams()
_setHashDB()
_resumeHashDBValues()
_setResultsFile()
_setAuthCred()
|
BPI-SINOVOIP/BPI-Mainline-kernel
|
refs/heads/master
|
toolchains/gcc-linaro-7.3.1-2018.05-x86_64_arm-linux-gnueabi/share/gdb/python/gdb/FrameDecorator.py
|
10
|
# Copyright (C) 2013-2018 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
# This small code snippet deals with problem of strings in Python 2.x
# and Python 3.x. Python 2.x has str and unicode classes which are
# sub-classes of basestring. In Python 3.x all strings are encoded
# and basestring has been removed.
try:
basestring
except NameError:
basestring = str
class FrameDecorator(object):
"""Basic implementation of a Frame Decorator"""
""" This base frame decorator decorates a frame or another frame
decorator, and provides convenience methods. If this object is
wrapping a frame decorator, defer to that wrapped object's method
if it has one. This allows for frame decorators that have
sub-classed FrameDecorator object, but also wrap other frame
decorators on the same frame to correctly execute.
E.g
If the result of frame filters running means we have one gdb.Frame
wrapped by multiple frame decorators, all sub-classed from
FrameDecorator, the resulting hierarchy will be:
Decorator1
-- (wraps) Decorator2
-- (wraps) FrameDecorator
-- (wraps) gdb.Frame
In this case we have two frame decorators, both of which are
sub-classed from FrameDecorator. If Decorator1 just overrides the
'function' method, then all of the other methods are carried out
by the super-class FrameDecorator. But Decorator2 may have
overriden other methods, so FrameDecorator will look at the
'base' parameter and defer to that class's methods. And so on,
down the chain."""
# 'base' can refer to a gdb.Frame or another frame decorator. In
# the latter case, the child class will have called the super
# method and _base will be an object conforming to the Frame Filter
# class.
def __init__(self, base):
self._base = base
@staticmethod
def _is_limited_frame(frame):
"""Internal utility to determine if the frame is special or
limited."""
sal = frame.find_sal()
if (not sal.symtab or not sal.symtab.filename
or frame.type() == gdb.DUMMY_FRAME
or frame.type() == gdb.SIGTRAMP_FRAME):
return True
return False
def elided(self):
"""Return any elided frames that this class might be
wrapping, or None."""
if hasattr(self._base, "elided"):
return self._base.elided()
return None
def function(self):
""" Return the name of the frame's function or an address of
the function of the frame. First determine if this is a
special frame. If not, try to determine filename from GDB's
frame internal function API. Finally, if a name cannot be
determined return the address. If this function returns an
address, GDB will attempt to determine the function name from
its internal minimal symbols store (for example, for inferiors
without debug-info)."""
# Both gdb.Frame, and FrameDecorator have a method called
# "function", so determine which object this is.
if not isinstance(self._base, gdb.Frame):
if hasattr(self._base, "function"):
# If it is not a gdb.Frame, and there is already a
# "function" method, use that.
return self._base.function()
frame = self.inferior_frame()
if frame.type() == gdb.DUMMY_FRAME:
return "<function called from gdb>"
elif frame.type() == gdb.SIGTRAMP_FRAME:
return "<signal handler called>"
func = frame.function()
# If we cannot determine the function name, return the
# address. If GDB detects an integer value from this function
# it will attempt to find the function name from minimal
# symbols via its own internal functions.
if func == None:
pc = frame.pc()
return pc
return str(func)
def address(self):
""" Return the address of the frame's pc"""
if hasattr(self._base, "address"):
return self._base.address()
frame = self.inferior_frame()
return frame.pc()
def filename(self):
""" Return the filename associated with this frame, detecting
and returning the appropriate library name is this is a shared
library."""
if hasattr(self._base, "filename"):
return self._base.filename()
frame = self.inferior_frame()
sal = frame.find_sal()
if not sal.symtab or not sal.symtab.filename:
pc = frame.pc()
return gdb.solib_name(pc)
else:
return sal.symtab.filename
def frame_args(self):
""" Return an iterable of frame arguments for this frame, if
any. The iterable object contains objects conforming with the
Symbol/Value interface. If there are no frame arguments, or
if this frame is deemed to be a special case, return None."""
if hasattr(self._base, "frame_args"):
return self._base.frame_args()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
args = FrameVars(frame)
return args.fetch_frame_args()
def frame_locals(self):
""" Return an iterable of local variables for this frame, if
any. The iterable object contains objects conforming with the
Symbol/Value interface. If there are no frame locals, or if
this frame is deemed to be a special case, return None."""
if hasattr(self._base, "frame_locals"):
return self._base.frame_locals()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
args = FrameVars(frame)
return args.fetch_frame_locals()
def line(self):
""" Return line number information associated with the frame's
pc. If symbol table/line information does not exist, or if
this frame is deemed to be a special case, return None"""
if hasattr(self._base, "line"):
return self._base.line()
frame = self.inferior_frame()
if self._is_limited_frame(frame):
return None
sal = frame.find_sal()
if (sal):
return sal.line
else:
return None
def inferior_frame(self):
""" Return the gdb.Frame underpinning this frame decorator."""
# If 'base' is a frame decorator, we want to call its inferior
# frame method. If '_base' is a gdb.Frame, just return that.
if hasattr(self._base, "inferior_frame"):
return self._base.inferior_frame()
return self._base
class SymValueWrapper(object):
"""A container class conforming to the Symbol/Value interface
which holds frame locals or frame arguments."""
def __init__(self, symbol, value):
self.sym = symbol
self.val = value
def value(self):
""" Return the value associated with this symbol, or None"""
return self.val
def symbol(self):
""" Return the symbol, or Python text, associated with this
symbol, or None"""
return self.sym
class FrameVars(object):
"""Utility class to fetch and store frame local variables, or
frame arguments."""
def __init__(self, frame):
self.frame = frame
self.symbol_class = {
gdb.SYMBOL_LOC_STATIC: True,
gdb.SYMBOL_LOC_REGISTER: True,
gdb.SYMBOL_LOC_ARG: True,
gdb.SYMBOL_LOC_REF_ARG: True,
gdb.SYMBOL_LOC_LOCAL: True,
gdb.SYMBOL_LOC_REGPARM_ADDR: True,
gdb.SYMBOL_LOC_COMPUTED: True
}
def fetch_b(self, sym):
""" Local utility method to determine if according to Symbol
type whether it should be included in the iterator. Not all
symbols are fetched, and only symbols that return
True from this method should be fetched."""
# SYM may be a string instead of a symbol in the case of
# synthetic local arguments or locals. If that is the case,
# always fetch.
if isinstance(sym, basestring):
return True
sym_type = sym.addr_class
return self.symbol_class.get(sym_type, False)
def fetch_frame_locals(self):
"""Public utility method to fetch frame local variables for
the stored frame. Frame arguments are not fetched. If there
are no frame local variables, return an empty list."""
lvars = []
try:
block = self.frame.block()
except RuntimeError:
block = None
while block != None:
if block.is_global or block.is_static:
break
for sym in block:
if sym.is_argument:
continue;
if self.fetch_b(sym):
lvars.append(SymValueWrapper(sym, None))
block = block.superblock
return lvars
def fetch_frame_args(self):
"""Public utility method to fetch frame arguments for the
stored frame. Frame arguments are the only type fetched. If
there are no frame argument variables, return an empty list."""
args = []
try:
block = self.frame.block()
except RuntimeError:
block = None
while block != None:
if block.function != None:
break
block = block.superblock
if block != None:
for sym in block:
if not sym.is_argument:
continue;
args.append(SymValueWrapper(sym, None))
return args
|
surligas/gnuradio
|
refs/heads/master
|
gr-analog/python/analog/qa_pwr_squelch.py
|
47
|
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks
class test_pwr_squelch(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_pwr_squelch_001(self):
# Test set/gets
alpha = 0.0001
thr1 = 10
thr2 = 20
ramp = 1
ramp2 = 2
gate = True
gate2 = False
op = analog.pwr_squelch_cc(thr1, alpha, ramp, gate)
op.set_threshold(thr2)
t = op.threshold()
self.assertEqual(thr2, t)
op.set_ramp(ramp2)
r = op.ramp()
self.assertEqual(ramp2, r)
op.set_gate(gate2)
g = op.gate()
self.assertEqual(gate2, g)
def test_pwr_squelch_002(self):
# Test runtime, gate=True
alpha = 0.0001
thr = -25
src_data = map(lambda x: float(x)/10.0, range(1, 40))
src = blocks.vector_source_c(src_data)
op = analog.pwr_squelch_cc(thr, alpha)
dst = blocks.vector_sink_c()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
expected_result = src_data
expected_result[0:20] = 20*[0,]
result_data = dst.data()
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 4)
def test_pwr_squelch_003(self):
# Test set/gets
alpha = 0.0001
thr1 = 10
thr2 = 20
ramp = 1
ramp2 = 2
gate = True
gate2 = False
op = analog.pwr_squelch_ff(thr1, alpha, ramp, gate)
op.set_threshold(thr2)
t = op.threshold()
self.assertEqual(thr2, t)
op.set_ramp(ramp2)
r = op.ramp()
self.assertEqual(ramp2, r)
op.set_gate(gate2)
g = op.gate()
self.assertEqual(gate2, g)
def test_pwr_squelch_004(self):
alpha = 0.0001
thr = -25
src_data = map(lambda x: float(x)/10.0, range(1, 40))
src = blocks.vector_source_f(src_data)
op = analog.pwr_squelch_ff(thr, alpha)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
expected_result = src_data
expected_result[0:20] = 20*[0,]
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_pwr_squelch, "test_pwr_squelch.xml")
|
grpc/grpc
|
refs/heads/master
|
tools/run_tests/xds_k8s_test_driver/framework/rpc/grpc_testing.py
|
4
|
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This contains helpers for gRPC services defined in
https://github.com/grpc/grpc/blob/master/src/proto/grpc/testing/test.proto
"""
import logging
from typing import Iterable, Optional, Tuple
import grpc
import framework.rpc
from src.proto.grpc.testing import messages_pb2
from src.proto.grpc.testing import test_pb2_grpc
# Type aliases
_LoadBalancerStatsRequest = messages_pb2.LoadBalancerStatsRequest
LoadBalancerStatsResponse = messages_pb2.LoadBalancerStatsResponse
_LoadBalancerAccumulatedStatsRequest = messages_pb2.LoadBalancerAccumulatedStatsRequest
LoadBalancerAccumulatedStatsResponse = messages_pb2.LoadBalancerAccumulatedStatsResponse
class LoadBalancerStatsServiceClient(framework.rpc.grpc.GrpcClientHelper):
stub: test_pb2_grpc.LoadBalancerStatsServiceStub
STATS_PARTIAL_RESULTS_TIMEOUT_SEC = 1200
STATS_ACCUMULATED_RESULTS_TIMEOUT_SEC = 600
def __init__(self, channel: grpc.Channel):
super().__init__(channel, test_pb2_grpc.LoadBalancerStatsServiceStub)
def get_client_stats(
self,
*,
num_rpcs: int,
timeout_sec: Optional[int] = STATS_PARTIAL_RESULTS_TIMEOUT_SEC,
) -> LoadBalancerStatsResponse:
if timeout_sec is None:
timeout_sec = self.STATS_PARTIAL_RESULTS_TIMEOUT_SEC
return self.call_unary_with_deadline(rpc='GetClientStats',
req=_LoadBalancerStatsRequest(
num_rpcs=num_rpcs,
timeout_sec=timeout_sec),
deadline_sec=timeout_sec,
log_level=logging.INFO)
def get_client_accumulated_stats(
self,
*,
timeout_sec: Optional[int] = None
) -> LoadBalancerAccumulatedStatsResponse:
if timeout_sec is None:
timeout_sec = self.STATS_ACCUMULATED_RESULTS_TIMEOUT_SEC
return self.call_unary_with_deadline(
rpc='GetClientAccumulatedStats',
req=_LoadBalancerAccumulatedStatsRequest(),
deadline_sec=timeout_sec,
log_level=logging.INFO)
class XdsUpdateClientConfigureServiceClient(framework.rpc.grpc.GrpcClientHelper
):
stub: test_pb2_grpc.XdsUpdateClientConfigureServiceStub
CONFIGURE_TIMEOUT_SEC: int = 5
def __init__(self, channel: grpc.Channel):
super().__init__(channel,
test_pb2_grpc.XdsUpdateClientConfigureServiceStub)
def configure(
self,
*,
rpc_types: Iterable[str],
metadata: Optional[Iterable[Tuple[str, str, str]]] = None,
app_timeout: Optional[int] = None,
timeout_sec: int = CONFIGURE_TIMEOUT_SEC,
) -> None:
request = messages_pb2.ClientConfigureRequest()
for rpc_type in rpc_types:
request.types.append(
messages_pb2.ClientConfigureRequest.RpcType.Value(rpc_type))
if metadata:
for entry in metadata:
request.metadata.append(
messages_pb2.ClientConfigureRequest.Metadata(
type=messages_pb2.ClientConfigureRequest.RpcType.Value(
entry[0]),
key=entry[1],
value=entry[2],
))
if app_timeout:
request.timeout_sec = app_timeout
# Configure's response is empty
self.call_unary_with_deadline(rpc='Configure',
req=request,
deadline_sec=timeout_sec,
log_level=logging.INFO)
|
saguziel/incubator-airflow
|
refs/heads/master
|
airflow/utils/state.py
|
55
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import unicode_literals
from builtins import object
class State(object):
"""
Static class with task instance states constants and color method to
avoid hardcoding.
"""
# scheduler
NONE = None
REMOVED = "removed"
SCHEDULED = "scheduled"
# set by the executor (t.b.d.)
# LAUNCHED = "launched"
# set by a task
QUEUED = "queued"
RUNNING = "running"
SUCCESS = "success"
SHUTDOWN = "shutdown" # External request to shut down
FAILED = "failed"
UP_FOR_RETRY = "up_for_retry"
UPSTREAM_FAILED = "upstream_failed"
SKIPPED = "skipped"
task_states = (
SUCCESS,
RUNNING,
FAILED,
UPSTREAM_FAILED,
UP_FOR_RETRY,
QUEUED,
)
dag_states = (
SUCCESS,
RUNNING,
FAILED,
)
state_color = {
QUEUED: 'gray',
RUNNING: 'lime',
SUCCESS: 'green',
SHUTDOWN: 'blue',
FAILED: 'red',
UP_FOR_RETRY: 'gold',
UPSTREAM_FAILED: 'orange',
SKIPPED: 'pink',
REMOVED: 'lightgrey',
SCHEDULED: 'white',
}
@classmethod
def color(cls, state):
if state in cls.state_color:
return cls.state_color[state]
else:
return 'white'
@classmethod
def color_fg(cls, state):
color = cls.color(state)
if color in ['green', 'red']:
return 'white'
else:
return 'black'
@classmethod
def finished(cls):
"""
A list of states indicating that a task started and completed a
run attempt. Note that the attempt could have resulted in failure or
have been interrupted; in any case, it is no longer running.
"""
return [
cls.SUCCESS,
cls.SHUTDOWN,
cls.FAILED,
cls.SKIPPED,
]
@classmethod
def unfinished(cls):
"""
A list of states indicating that a task either has not completed
a run or has not even started.
"""
return [
cls.NONE,
cls.SCHEDULED,
cls.QUEUED,
cls.RUNNING,
cls.UP_FOR_RETRY
]
|
charukiewicz/beer-manager
|
refs/heads/master
|
venv/lib/python3.4/site-packages/pip/_vendor/html5lib/treewalkers/_base.py
|
310
|
from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type, string_types
import gettext
_ = gettext.gettext
from xml.dom import Node
DOCUMENT = Node.DOCUMENT_NODE
DOCTYPE = Node.DOCUMENT_TYPE_NODE
TEXT = Node.TEXT_NODE
ELEMENT = Node.ELEMENT_NODE
COMMENT = Node.COMMENT_NODE
ENTITY = Node.ENTITY_NODE
UNKNOWN = "<#UNKNOWN#>"
from ..constants import voidElements, spaceCharacters
spaceCharacters = "".join(spaceCharacters)
def to_text(s, blank_if_none=True):
"""Wrapper around six.text_type to convert None to empty string"""
if s is None:
if blank_if_none:
return ""
else:
return None
elif isinstance(s, text_type):
return s
else:
return text_type(s)
def is_text_or_none(string):
"""Wrapper around isinstance(string_types) or is None"""
return string is None or isinstance(string, string_types)
class TreeWalker(object):
def __init__(self, tree):
self.tree = tree
def __iter__(self):
raise NotImplementedError
def error(self, msg):
return {"type": "SerializeError", "data": msg}
def emptyTag(self, namespace, name, attrs, hasChildren=False):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
yield {"type": "EmptyTag", "name": to_text(name, False),
"namespace": to_text(namespace),
"data": attrs}
if hasChildren:
yield self.error(_("Void element has children"))
def startTag(self, namespace, name, attrs):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(name)
assert all((namespace is None or isinstance(namespace, string_types)) and
isinstance(name, string_types) and
isinstance(value, string_types)
for (namespace, name), value in attrs.items())
return {"type": "StartTag",
"name": text_type(name),
"namespace": to_text(namespace),
"data": dict(((to_text(namespace, False), to_text(name)),
to_text(value, False))
for (namespace, name), value in attrs.items())}
def endTag(self, namespace, name):
assert namespace is None or isinstance(namespace, string_types), type(namespace)
assert isinstance(name, string_types), type(namespace)
return {"type": "EndTag",
"name": to_text(name, False),
"namespace": to_text(namespace),
"data": {}}
def text(self, data):
assert isinstance(data, string_types), type(data)
data = to_text(data)
middle = data.lstrip(spaceCharacters)
left = data[:len(data) - len(middle)]
if left:
yield {"type": "SpaceCharacters", "data": left}
data = middle
middle = data.rstrip(spaceCharacters)
right = data[len(middle):]
if middle:
yield {"type": "Characters", "data": middle}
if right:
yield {"type": "SpaceCharacters", "data": right}
def comment(self, data):
assert isinstance(data, string_types), type(data)
return {"type": "Comment", "data": text_type(data)}
def doctype(self, name, publicId=None, systemId=None, correct=True):
assert is_text_or_none(name), type(name)
assert is_text_or_none(publicId), type(publicId)
assert is_text_or_none(systemId), type(systemId)
return {"type": "Doctype",
"name": to_text(name),
"publicId": to_text(publicId),
"systemId": to_text(systemId),
"correct": to_text(correct)}
def entity(self, name):
assert isinstance(name, string_types), type(name)
return {"type": "Entity", "name": text_type(name)}
def unknown(self, nodeType):
return self.error(_("Unknown node type: ") + nodeType)
class NonRecursiveTreeWalker(TreeWalker):
def getNodeDetails(self, node):
raise NotImplementedError
def getFirstChild(self, node):
raise NotImplementedError
def getNextSibling(self, node):
raise NotImplementedError
def getParentNode(self, node):
raise NotImplementedError
def __iter__(self):
currentNode = self.tree
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
hasChildren = False
if type == DOCTYPE:
yield self.doctype(*details)
elif type == TEXT:
for token in self.text(*details):
yield token
elif type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name in voidElements:
for token in self.emptyTag(namespace, name, attributes,
hasChildren):
yield token
hasChildren = False
else:
yield self.startTag(namespace, name, attributes)
elif type == COMMENT:
yield self.comment(details[0])
elif type == ENTITY:
yield self.entity(details[0])
elif type == DOCUMENT:
hasChildren = True
else:
yield self.unknown(details[0])
if hasChildren:
firstChild = self.getFirstChild(currentNode)
else:
firstChild = None
if firstChild is not None:
currentNode = firstChild
else:
while currentNode is not None:
details = self.getNodeDetails(currentNode)
type, details = details[0], details[1:]
if type == ELEMENT:
namespace, name, attributes, hasChildren = details
if name not in voidElements:
yield self.endTag(namespace, name)
if self.tree is currentNode:
currentNode = None
break
nextSibling = self.getNextSibling(currentNode)
if nextSibling is not None:
currentNode = nextSibling
break
else:
currentNode = self.getParentNode(currentNode)
|
pkoutsias/SickRage
|
refs/heads/master
|
lib/lockfile/sqlitelockfile.py
|
45
|
from __future__ import absolute_import, division
import time
import os
try:
unicode
except NameError:
unicode = str
from . import LockBase, NotLocked, NotMyLock, LockTimeout, AlreadyLocked
class SQLiteLockFile(LockBase):
"Demonstrate SQL-based locking."
testdb = None
def __init__(self, path, threaded=True, timeout=None):
"""
>>> lock = SQLiteLockFile('somefile')
>>> lock = SQLiteLockFile('somefile', threaded=False)
"""
LockBase.__init__(self, path, threaded, timeout)
self.lock_file = unicode(self.lock_file)
self.unique_name = unicode(self.unique_name)
if SQLiteLockFile.testdb is None:
import tempfile
_fd, testdb = tempfile.mkstemp()
os.close(_fd)
os.unlink(testdb)
del _fd, tempfile
SQLiteLockFile.testdb = testdb
import sqlite3
self.connection = sqlite3.connect(SQLiteLockFile.testdb)
c = self.connection.cursor()
try:
c.execute("create table locks"
"("
" lock_file varchar(32),"
" unique_name varchar(32)"
")")
except sqlite3.OperationalError:
pass
else:
self.connection.commit()
import atexit
atexit.register(os.unlink, SQLiteLockFile.testdb)
def acquire(self, timeout=None):
timeout = timeout if timeout is not None else self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
if timeout is None:
wait = 0.1
elif timeout <= 0:
wait = 0
else:
wait = timeout / 10
cursor = self.connection.cursor()
while True:
if not self.is_locked():
# Not locked. Try to lock it.
cursor.execute("insert into locks"
" (lock_file, unique_name)"
" values"
" (?, ?)",
(self.lock_file, self.unique_name))
self.connection.commit()
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) > 1:
# Nope. Someone else got there. Remove our lock.
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
else:
# Yup. We're done, so go home.
return
else:
# Check to see if we are the only lock holder.
cursor.execute("select * from locks"
" where unique_name = ?",
(self.unique_name,))
rows = cursor.fetchall()
if len(rows) == 1:
# We're the locker, so go home.
return
# Maybe we should wait a bit longer.
if timeout is not None and time.time() > end_time:
if timeout > 0:
# No more waiting.
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
# Someone else has the lock and we are impatient..
raise AlreadyLocked("%s is already locked" % self.path)
# Well, okay. We'll give it a bit longer.
time.sleep(wait)
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
if not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me (by %s)" %
(self.unique_name, self._who_is_locking()))
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where unique_name = ?",
(self.unique_name,))
self.connection.commit()
def _who_is_locking(self):
cursor = self.connection.cursor()
cursor.execute("select unique_name from locks"
" where lock_file = ?",
(self.lock_file,))
return cursor.fetchone()[0]
def is_locked(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?",
(self.lock_file,))
rows = cursor.fetchall()
return not not rows
def i_am_locking(self):
cursor = self.connection.cursor()
cursor.execute("select * from locks"
" where lock_file = ?"
" and unique_name = ?",
(self.lock_file, self.unique_name))
return not not cursor.fetchall()
def break_lock(self):
cursor = self.connection.cursor()
cursor.execute("delete from locks"
" where lock_file = ?",
(self.lock_file,))
self.connection.commit()
|
donniexyz/calligra
|
refs/heads/master
|
3rdparty/google-breakpad/src/tools/gyp/test/generator-output/actions/subdir2/make-file.py
|
973
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
contents = "Hello from make-file.py\n"
open(sys.argv[1], 'wb').write(contents)
|
swt30/beets
|
refs/heads/master
|
test/test_mediafile.py
|
2
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Automatically-generated blanket testing for the MediaFile metadata
layer.
"""
from __future__ import division, absolute_import, print_function
import os
import shutil
import tempfile
import datetime
import time
from test import _common
from test._common import unittest
from beets.mediafile import MediaFile, MediaField, Image, \
MP3DescStorageStyle, StorageStyle, MP4StorageStyle, \
ASFStorageStyle, ImageType, CoverArtField
from beets.library import Item
from beets.plugins import BeetsPlugin
class ArtTestMixin(object):
"""Test reads and writes of the ``art`` property.
"""
@property
def png_data(self):
if not self._png_data:
with open(os.path.join(_common.RSRC, 'image-2x3.png'), 'rb') as f:
self._png_data = f.read()
return self._png_data
_png_data = None
@property
def jpg_data(self):
if not self._jpg_data:
with open(os.path.join(_common.RSRC, 'image-2x3.jpg'), 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
@property
def tiff_data(self):
if not self._jpg_data:
with open(os.path.join(_common.RSRC, 'image-2x3.tiff'), 'rb') as f:
self._jpg_data = f.read()
return self._jpg_data
_jpg_data = None
def test_set_png_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.png_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.art, self.png_data)
def test_set_jpg_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.art, self.jpg_data)
def test_delete_art(self):
mediafile = self._mediafile_fixture('empty')
mediafile.art = self.jpg_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNotNone(mediafile.art)
del mediafile.art
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNone(mediafile.art)
class ImageStructureTestMixin(ArtTestMixin):
"""Test reading and writing multiple image tags.
The tests use the `image` media file fixture. The tags of these files
include two images, on in the PNG format, the other in JPEG format. If
the tag format supports it they also include additional metadata.
"""
def test_read_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = next(i for i in mediafile.images
if i.mime_type == 'image/png')
self.assertEqual(image.data, self.png_data)
self.assertExtendedImageAttributes(image, desc=u'album cover',
type=ImageType.front)
image = next(i for i in mediafile.images
if i.mime_type == 'image/jpeg')
self.assertEqual(image.data, self.jpg_data)
self.assertExtendedImageAttributes(image, desc=u'the artist',
type=ImageType.artist)
def test_set_image_structure(self):
mediafile = self._mediafile_fixture('empty')
image = Image(data=self.png_data, desc=u'album cover',
type=ImageType.front)
mediafile.images = [image]
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 1)
image = mediafile.images[0]
self.assertEqual(image.data, self.png_data)
self.assertEqual(image.mime_type, 'image/png')
self.assertExtendedImageAttributes(image, desc=u'album cover',
type=ImageType.front)
def test_add_image_structure(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.png_data, desc=u'the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 3)
images = (i for i in mediafile.images if i.desc == u'the composer')
image = next(images, None)
self.assertExtendedImageAttributes(
image, desc=u'the composer', type=ImageType.composer
)
def test_delete_image_structures(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
del mediafile.images
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 0)
def test_guess_cover(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
cover = CoverArtField.guess_cover_image(mediafile.images)
self.assertEqual(cover.desc, u'album cover')
self.assertEqual(mediafile.art, cover.data)
def assertExtendedImageAttributes(self, image, **kwargs):
"""Ignore extended image attributes in the base tests.
"""
pass
class ExtendedImageStructureTestMixin(ImageStructureTestMixin):
"""Checks for additional attributes in the image structure."""
def assertExtendedImageAttributes(self, image, desc=None, type=None):
self.assertEqual(image.desc, desc)
self.assertEqual(image.type, type)
def test_add_tiff_image(self):
mediafile = self._mediafile_fixture('image')
self.assertEqual(len(mediafile.images), 2)
image = Image(data=self.tiff_data, desc=u'the composer',
type=ImageType.composer)
mediafile.images += [image]
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(len(mediafile.images), 3)
# WMA does not preserve the order, so we have to work around this
image = filter(lambda i: i.mime_type == 'image/tiff',
mediafile.images)[0]
self.assertExtendedImageAttributes(
image, desc=u'the composer', type=ImageType.composer)
class LazySaveTestMixin(object):
"""Mediafile should only write changes when tags have changed
"""
@unittest.skip(u'not yet implemented')
def test_unmodified(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.save()
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
@unittest.skip(u'not yet implemented')
def test_same_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.title = mediafile.title
mediafile.save()
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
def test_update_same_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.update({'title': mediafile.title})
mediafile.save()
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
@unittest.skip(u'not yet implemented')
def test_tag_value_change(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.title = mediafile.title
mediafile.album = u'another'
mediafile.save()
self.assertNotEqual(os.stat(mediafile.path).st_mtime, mtime)
def test_update_changed_tag_value(self):
mediafile = self._mediafile_fixture('full')
mtime = self._set_past_mtime(mediafile.path)
self.assertEqual(os.stat(mediafile.path).st_mtime, mtime)
mediafile.update({'title': mediafile.title, 'album': u'another'})
mediafile.save()
self.assertNotEqual(os.stat(mediafile.path).st_mtime, mtime)
def _set_past_mtime(self, path):
mtime = round(time.time() - 10000)
os.utime(path, (mtime, mtime))
return mtime
class GenreListTestMixin(object):
"""Tests access to the ``genres`` property as a list.
"""
def test_read_genre_list(self):
mediafile = self._mediafile_fixture('full')
self.assertItemsEqual(mediafile.genres, ['the genre'])
def test_write_genre_list(self):
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertItemsEqual(mediafile.genres, [u'one', u'two'])
def test_write_genre_list_get_first(self):
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.genre, u'one')
def test_append_genre_list(self):
mediafile = self._mediafile_fixture('full')
self.assertEqual(mediafile.genre, u'the genre')
mediafile.genres += [u'another']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertItemsEqual(mediafile.genres, [u'the genre', u'another'])
field_extension = MediaField(
MP3DescStorageStyle(b'customtag'),
MP4StorageStyle(b'----:com.apple.iTunes:customtag'),
StorageStyle(b'customtag'),
ASFStorageStyle(b'customtag'),
)
class ExtendedFieldTestMixin(object):
def test_extended_field_write(self):
plugin = BeetsPlugin()
plugin.add_media_field('customtag', field_extension)
mediafile = self._mediafile_fixture('empty')
mediafile.customtag = u'F#'
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.customtag, u'F#')
delattr(MediaFile, 'customtag')
Item._media_fields.remove('customtag')
def test_write_extended_tag_from_item(self):
plugin = BeetsPlugin()
plugin.add_media_field('customtag', field_extension)
mediafile = self._mediafile_fixture('empty')
self.assertIsNone(mediafile.customtag)
item = Item(path=mediafile.path, customtag=u'Gb')
item.write()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.customtag, u'Gb')
delattr(MediaFile, 'customtag')
Item._media_fields.remove('customtag')
def test_read_flexible_attribute_from_file(self):
plugin = BeetsPlugin()
plugin.add_media_field('customtag', field_extension)
mediafile = self._mediafile_fixture('empty')
mediafile.update({'customtag': u'F#'})
mediafile.save()
item = Item.from_path(mediafile.path)
self.assertEqual(item['customtag'], u'F#')
delattr(MediaFile, 'customtag')
Item._media_fields.remove('customtag')
def test_invalid_descriptor(self):
with self.assertRaises(ValueError) as cm:
MediaFile.add_field('somekey', True)
self.assertIn(u'must be an instance of MediaField',
unicode(cm.exception))
def test_overwrite_property(self):
with self.assertRaises(ValueError) as cm:
MediaFile.add_field('artist', MediaField())
self.assertIn(u'property "artist" already exists',
unicode(cm.exception))
class ReadWriteTestBase(ArtTestMixin, GenreListTestMixin,
ExtendedFieldTestMixin):
"""Test writing and reading tags. Subclasses must set ``extension`` and
``audio_properties``.
"""
full_initial_tags = {
'title': u'full',
'artist': u'the artist',
'album': u'the album',
'genre': u'the genre',
'composer': u'the composer',
'grouping': u'the grouping',
'year': 2001,
'month': None,
'day': None,
'date': datetime.date(2001, 1, 1),
'track': 2,
'tracktotal': 3,
'disc': 4,
'disctotal': 5,
'lyrics': u'the lyrics',
'comments': u'the comments',
'bpm': 6,
'comp': True,
'mb_trackid': '8b882575-08a5-4452-a7a7-cbb8a1531f9e',
'mb_albumid': '9e873859-8aa4-4790-b985-5a953e8ef628',
'mb_artistid': '7cf0ea9d-86b9-4dad-ba9e-2355a64899ea',
'art': None,
'label': u'the label',
}
tag_fields = [
'title',
'artist',
'album',
'genre',
'composer',
'grouping',
'year',
'month',
'day',
'date',
'track',
'tracktotal',
'disc',
'disctotal',
'lyrics',
'comments',
'bpm',
'comp',
'mb_trackid',
'mb_albumid',
'mb_artistid',
'art',
'label',
'rg_track_peak',
'rg_track_gain',
'rg_album_peak',
'rg_album_gain',
'albumartist',
'mb_albumartistid',
'artist_sort',
'albumartist_sort',
'acoustid_fingerprint',
'acoustid_id',
'mb_releasegroupid',
'asin',
'catalognum',
'disctitle',
'script',
'language',
'country',
'albumstatus',
'media',
'albumdisambig',
'artist_credit',
'albumartist_credit',
'original_year',
'original_month',
'original_day',
'original_date',
'initial_key',
]
def setUp(self):
self.temp_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.isdir(self.temp_dir):
shutil.rmtree(self.temp_dir)
def test_read_audio_properties(self):
mediafile = self._mediafile_fixture('full')
for key, value in self.audio_properties.items():
if isinstance(value, float):
self.assertAlmostEqual(getattr(mediafile, key), value,
delta=0.1)
else:
self.assertEqual(getattr(mediafile, key), value)
def test_read_full(self):
mediafile = self._mediafile_fixture('full')
self.assertTags(mediafile, self.full_initial_tags)
def test_read_empty(self):
mediafile = self._mediafile_fixture('empty')
for field in self.tag_fields:
self.assertIsNone(getattr(mediafile, field))
def test_write_empty(self):
mediafile = self._mediafile_fixture('empty')
tags = self._generate_tags()
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_update_empty(self):
mediafile = self._mediafile_fixture('empty')
tags = self._generate_tags()
mediafile.update(tags)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_overwrite_full(self):
mediafile = self._mediafile_fixture('full')
tags = self._generate_tags()
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
# Make sure the tags are already set when writing a second time
for key, value in tags.items():
setattr(mediafile, key, value)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_update_full(self):
mediafile = self._mediafile_fixture('full')
tags = self._generate_tags()
mediafile.update(tags)
mediafile.save()
# Make sure the tags are already set when writing a second time
mediafile.update(tags)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertTags(mediafile, tags)
def test_write_date_components(self):
mediafile = self._mediafile_fixture('full')
mediafile.year = 2001
mediafile.month = 1
mediafile.day = 2
mediafile.original_year = 1999
mediafile.original_month = 12
mediafile.original_day = 30
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.year, 2001)
self.assertEqual(mediafile.month, 1)
self.assertEqual(mediafile.day, 2)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 2))
self.assertEqual(mediafile.original_year, 1999)
self.assertEqual(mediafile.original_month, 12)
self.assertEqual(mediafile.original_day, 30)
self.assertEqual(mediafile.original_date, datetime.date(1999, 12, 30))
def test_write_incomplete_date_components(self):
mediafile = self._mediafile_fixture('empty')
mediafile.year = 2001
mediafile.month = None
mediafile.day = 2
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.year, 2001)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 1))
def test_write_dates(self):
mediafile = self._mediafile_fixture('full')
mediafile.date = datetime.date(2001, 1, 2)
mediafile.original_date = datetime.date(1999, 12, 30)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.year, 2001)
self.assertEqual(mediafile.month, 1)
self.assertEqual(mediafile.day, 2)
self.assertEqual(mediafile.date, datetime.date(2001, 1, 2))
self.assertEqual(mediafile.original_year, 1999)
self.assertEqual(mediafile.original_month, 12)
self.assertEqual(mediafile.original_day, 30)
self.assertEqual(mediafile.original_date, datetime.date(1999, 12, 30))
def test_write_packed(self):
mediafile = self._mediafile_fixture('empty')
mediafile.tracktotal = 2
mediafile.track = 1
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.track, 1)
self.assertEqual(mediafile.tracktotal, 2)
def test_write_counters_without_total(self):
mediafile = self._mediafile_fixture('full')
self.assertEqual(mediafile.track, 2)
self.assertEqual(mediafile.tracktotal, 3)
self.assertEqual(mediafile.disc, 4)
self.assertEqual(mediafile.disctotal, 5)
mediafile.track = 10
delattr(mediafile, 'tracktotal')
mediafile.disc = 10
delattr(mediafile, 'disctotal')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.track, 10)
self.assertEqual(mediafile.tracktotal, None)
self.assertEqual(mediafile.disc, 10)
self.assertEqual(mediafile.disctotal, None)
def test_unparseable_date(self):
mediafile = self._mediafile_fixture('unparseable')
self.assertIsNone(mediafile.date)
self.assertIsNone(mediafile.year)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
def test_delete_tag(self):
mediafile = self._mediafile_fixture('full')
keys = self.full_initial_tags.keys()
for key in set(keys) - set(['art', 'month', 'day']):
self.assertIsNotNone(getattr(mediafile, key))
for key in keys:
delattr(mediafile, key)
mediafile.save()
mediafile = MediaFile(mediafile.path)
for key in keys:
self.assertIsNone(getattr(mediafile, key))
def test_delete_packed_total(self):
mediafile = self._mediafile_fixture('full')
delattr(mediafile, 'tracktotal')
delattr(mediafile, 'disctotal')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.track, self.full_initial_tags['track'])
self.assertEqual(mediafile.disc, self.full_initial_tags['disc'])
def test_delete_partial_date(self):
mediafile = self._mediafile_fixture('empty')
mediafile.date = datetime.date(2001, 12, 3)
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
self.assertIsNotNone(mediafile.month)
self.assertIsNotNone(mediafile.day)
delattr(mediafile, 'month')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
self.assertIsNone(mediafile.month)
self.assertIsNone(mediafile.day)
def test_delete_year(self):
mediafile = self._mediafile_fixture('full')
self.assertIsNotNone(mediafile.date)
self.assertIsNotNone(mediafile.year)
delattr(mediafile, 'year')
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIsNone(mediafile.date)
self.assertIsNone(mediafile.year)
def assertTags(self, mediafile, tags):
errors = []
for key, value in tags.items():
try:
value2 = getattr(mediafile, key)
except AttributeError:
errors.append(u'Tag %s does not exist' % key)
else:
if value2 != value:
errors.append(u'Tag %s: %r != %r' % (key, value2, value))
if any(errors):
errors = [u'Tags did not match'] + errors
self.fail('\n '.join(errors))
def _mediafile_fixture(self, name):
name = name + '.' + self.extension
src = os.path.join(_common.RSRC, name)
target = os.path.join(self.temp_dir, name)
shutil.copy(src, target)
return MediaFile(target)
def _generate_tags(self, base=None):
"""Return dictionary of tags, mapping tag names to values.
"""
tags = {}
for key in self.tag_fields:
if key.startswith('rg_'):
# ReplayGain is float
tags[key] = 1.0
else:
tags[key] = b'value\u2010%s' % key
for key in ['disc', 'disctotal', 'track', 'tracktotal', 'bpm']:
tags[key] = 1
tags['art'] = self.jpg_data
tags['comp'] = True
date = datetime.date(2001, 4, 3)
tags['date'] = date
tags['year'] = date.year
tags['month'] = date.month
tags['day'] = date.day
original_date = datetime.date(1999, 5, 6)
tags['original_date'] = original_date
tags['original_year'] = original_date.year
tags['original_month'] = original_date.month
tags['original_day'] = original_date.day
return tags
class PartialTestMixin(object):
tags_without_total = {
'track': 2,
'tracktotal': 0,
'disc': 4,
'disctotal': 0,
}
def test_read_track_without_total(self):
mediafile = self._mediafile_fixture('partial')
self.assertEqual(mediafile.track, 2)
self.assertIsNone(mediafile.tracktotal)
self.assertEqual(mediafile.disc, 4)
self.assertIsNone(mediafile.disctotal)
class MP3Test(ReadWriteTestBase, PartialTestMixin,
ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'mp3'
audio_properties = {
'length': 1.0,
'bitrate': 80000,
'format': 'MP3',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_unknown_apic_type(self):
mediafile = self._mediafile_fixture('image_unknown_type')
self.assertEqual(mediafile.images[0].type, ImageType.other)
class MP4Test(ReadWriteTestBase, PartialTestMixin,
ImageStructureTestMixin, unittest.TestCase):
extension = 'm4a'
audio_properties = {
'length': 1.0,
'bitrate': 64000,
'format': 'AAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 2,
}
def test_add_tiff_image_fails(self):
mediafile = self._mediafile_fixture('empty')
with self.assertRaises(ValueError):
mediafile.images = [Image(data=self.tiff_data)]
def test_guess_cover(self):
# There is no metadata associated with images, we pick one at random
pass
class AlacTest(ReadWriteTestBase, unittest.TestCase):
extension = 'alac.m4a'
audio_properties = {
'length': 1.0,
'bitrate': 21830,
# 'format': 'ALAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class MusepackTest(ReadWriteTestBase, unittest.TestCase):
extension = 'mpc'
audio_properties = {
'length': 1.0,
'bitrate': 23458,
'format': u'Musepack',
'samplerate': 44100,
'bitdepth': 0,
'channels': 2,
}
class WMATest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'wma'
audio_properties = {
'length': 1.0,
'bitrate': 128000,
'format': u'Windows Media',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_write_genre_list_get_first(self):
# WMA does not preserve list order
mediafile = self._mediafile_fixture('empty')
mediafile.genres = [u'one', u'two']
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertIn(mediafile.genre, [u'one', u'two'])
def test_read_pure_tags(self):
mediafile = self._mediafile_fixture('pure')
self.assertEqual(mediafile.comments, u'the comments')
self.assertEqual(mediafile.title, u'the title')
self.assertEqual(mediafile.artist, u'the artist')
class OggTest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'ogg'
audio_properties = {
'length': 1.0,
'bitrate': 48000,
'format': u'OGG',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
def test_read_date_from_year_tag(self):
mediafile = self._mediafile_fixture('year')
self.assertEqual(mediafile.year, 2000)
self.assertEqual(mediafile.date, datetime.date(2000, 1, 1))
def test_write_date_to_year_tag(self):
mediafile = self._mediafile_fixture('empty')
mediafile.year = 2000
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertEqual(mediafile.mgfile['YEAR'], [u'2000'])
def test_legacy_coverart_tag(self):
mediafile = self._mediafile_fixture('coverart')
self.assertTrue('coverart' in mediafile.mgfile)
self.assertEqual(mediafile.art, self.png_data)
mediafile.art = self.png_data
mediafile.save()
mediafile = MediaFile(mediafile.path)
self.assertFalse('coverart' in mediafile.mgfile)
def test_date_tag_with_slashes(self):
mediafile = self._mediafile_fixture('date_with_slashes')
self.assertEqual(mediafile.year, 2005)
self.assertEqual(mediafile.month, 6)
self.assertEqual(mediafile.day, 5)
class FlacTest(ReadWriteTestBase, PartialTestMixin,
ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'flac'
audio_properties = {
'length': 1.0,
'bitrate': 175120,
'format': u'FLAC',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class ApeTest(ReadWriteTestBase, ExtendedImageStructureTestMixin,
unittest.TestCase):
extension = 'ape'
audio_properties = {
'length': 1.0,
'bitrate': 112040,
'format': u'APE',
'samplerate': 44100,
'bitdepth': 16,
'channels': 1,
}
class WavpackTest(ReadWriteTestBase, unittest.TestCase):
extension = 'wv'
audio_properties = {
'length': 1.0,
'bitrate': 108744,
'format': u'WavPack',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
class OpusTest(ReadWriteTestBase, unittest.TestCase):
extension = 'opus'
audio_properties = {
'length': 1.0,
'bitrate': 57984,
'format': u'Opus',
'samplerate': 48000,
'bitdepth': 0,
'channels': 1,
}
class AIFFTest(ReadWriteTestBase, unittest.TestCase):
extension = 'aiff'
audio_properties = {
'length': 1.0,
'bitrate': 705600,
'format': u'AIFF',
'samplerate': 44100,
'bitdepth': 0,
'channels': 1,
}
class MediaFieldTest(unittest.TestCase):
def test_properties_from_fields(self):
path = os.path.join(_common.RSRC, 'full.mp3')
mediafile = MediaFile(path)
for field in MediaFile.fields():
self.assertTrue(hasattr(mediafile, field))
def test_properties_from_readable_fields(self):
path = os.path.join(_common.RSRC, 'full.mp3')
mediafile = MediaFile(path)
for field in MediaFile.readable_fields():
self.assertTrue(hasattr(mediafile, field))
def test_known_fields(self):
fields = list(ReadWriteTestBase.tag_fields)
fields.extend(('encoder', 'images', 'genres', 'albumtype'))
self.assertItemsEqual(MediaFile.fields(), fields)
def test_fields_in_readable_fields(self):
readable = MediaFile.readable_fields()
for field in MediaFile.fields():
self.assertIn(field, readable)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == b'__main__':
unittest.main(defaultTest='suite')
|
varunarya10/boto
|
refs/heads/develop
|
tests/compat.py
|
115
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Use unittest2 for older versions of Python
try:
import unittest2 as unittest
except ImportError:
import unittest
# Use thirdt party ordereddict for older versions of Python
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
# Use standard unittest.mock if possible. (mock doesn't support Python 3.4)
try:
from unittest import mock
except ImportError:
import mock
|
BT-jmichaud/bank-statement-reconcile
|
refs/heads/8.0
|
account_statement_operation_multicompany/account.py
|
15
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account Statement Operation Multi-company module for Odoo
# Copyright (C) 2015 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class AccountStatementOperationTemplate(models.Model):
_inherit = 'account.statement.operation.template'
company_id = fields.Many2one(
'res.company', string='Company', related='account_id.company_id',
store=True, readonly=True)
|
eedf/jeito
|
refs/heads/master
|
upkeep/admin.py
|
1
|
from django.contrib import admin
from mptt.admin import MPTTModelAdmin
from .models import Issue, Place, Priority, Skill
@admin.register(Place)
class PlaceAdmin(MPTTModelAdmin):
list_display = ('title', )
search_fields = ('title', )
@admin.register(Skill)
class SkillAdmin(admin.ModelAdmin):
list_display = ('title', )
search_fields = ('title', )
@admin.register(Priority)
class PriorityAdmin(admin.ModelAdmin):
list_display = ('title', )
search_fields = ('title', )
@admin.register(Issue)
class IssueAdmin(admin.ModelAdmin):
list_display = ('title', 'priority', 'place', 'skill')
list_filter = ('priority', 'place', 'skill')
search_fields = ('title', 'description')
raw_id_fields = ('blocks', )
|
smkr/pyclipse
|
refs/heads/master
|
plugins/org.python.pydev/tests/pysrc/extendable/help/__init__.py
|
11
|
#note: module name MUST be help
about = 'test'
|
nburn42/tensorflow
|
refs/heads/master
|
tensorflow/python/summary/summary_test.py
|
75
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary as summary_lib
class ScalarSummaryTest(test.TestCase):
def testScalarSummary(self):
with self.test_session() as s:
i = constant_op.constant(3)
with ops.name_scope('outer'):
im = summary_lib.scalar('inner', i)
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'outer/inner')
self.assertEqual(values[0].simple_value, 3.0)
def testScalarSummaryWithFamily(self):
with self.test_session() as s:
i = constant_op.constant(7)
with ops.name_scope('outer'):
im1 = summary_lib.scalar('inner', i, family='family')
self.assertEquals(im1.op.name, 'outer/family/inner')
im2 = summary_lib.scalar('inner', i, family='family')
self.assertEquals(im2.op.name, 'outer/family/inner_1')
sm1, sm2 = s.run([im1, im2])
summary = summary_pb2.Summary()
summary.ParseFromString(sm1)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'family/outer/family/inner')
self.assertEqual(values[0].simple_value, 7.0)
summary.ParseFromString(sm2)
values = summary.value
self.assertEqual(len(values), 1)
self.assertEqual(values[0].tag, 'family/outer/family/inner_1')
self.assertEqual(values[0].simple_value, 7.0)
def testSummarizingVariable(self):
with self.test_session() as s:
c = constant_op.constant(42.0)
v = variables.Variable(c)
ss = summary_lib.scalar('summary', v)
init = variables.global_variables_initializer()
s.run(init)
summ_str = s.run(ss)
summary = summary_pb2.Summary()
summary.ParseFromString(summ_str)
self.assertEqual(len(summary.value), 1)
value = summary.value[0]
self.assertEqual(value.tag, 'summary')
self.assertEqual(value.simple_value, 42.0)
def testImageSummary(self):
with self.test_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
im = summary_lib.image('inner', i, max_outputs=3)
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('outer/inner/image/{}'.format(i) for i in xrange(3))
self.assertEqual(tags, expected)
def testImageSummaryWithFamily(self):
with self.test_session() as s:
i = array_ops.ones((5, 2, 3, 1))
with ops.name_scope('outer'):
im = summary_lib.image('inner', i, max_outputs=3, family='family')
self.assertEquals(im.op.name, 'outer/family/inner')
summary_str = s.run(im)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('family/outer/family/inner/image/{}'.format(i)
for i in xrange(3))
self.assertEqual(tags, expected)
def testHistogramSummary(self):
with self.test_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
summ_op = summary_lib.histogram('inner', i)
summary_str = s.run(summ_op)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
self.assertEqual(len(summary.value), 1)
self.assertEqual(summary.value[0].tag, 'outer/inner')
def testHistogramSummaryWithFamily(self):
with self.test_session() as s:
i = array_ops.ones((5, 4, 4, 3))
with ops.name_scope('outer'):
summ_op = summary_lib.histogram('inner', i, family='family')
self.assertEquals(summ_op.op.name, 'outer/family/inner')
summary_str = s.run(summ_op)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
self.assertEqual(len(summary.value), 1)
self.assertEqual(summary.value[0].tag, 'family/outer/family/inner')
def testAudioSummary(self):
with self.test_session() as s:
i = array_ops.ones((5, 3, 4))
with ops.name_scope('outer'):
aud = summary_lib.audio('inner', i, 0.2, max_outputs=3)
summary_str = s.run(aud)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('outer/inner/audio/{}'.format(i) for i in xrange(3))
self.assertEqual(tags, expected)
def testAudioSummaryWithFamily(self):
with self.test_session() as s:
i = array_ops.ones((5, 3, 4))
with ops.name_scope('outer'):
aud = summary_lib.audio('inner', i, 0.2, max_outputs=3, family='family')
self.assertEquals(aud.op.name, 'outer/family/inner')
summary_str = s.run(aud)
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
values = summary.value
self.assertEqual(len(values), 3)
tags = sorted(v.tag for v in values)
expected = sorted('family/outer/family/inner/audio/{}'.format(i)
for i in xrange(3))
self.assertEqual(tags, expected)
def testSummaryNameConversion(self):
c = constant_op.constant(3)
s = summary_lib.scalar('name with spaces', c)
self.assertEqual(s.op.name, 'name_with_spaces')
s2 = summary_lib.scalar('name with many $#illegal^: characters!', c)
self.assertEqual(s2.op.name, 'name_with_many___illegal___characters_')
s3 = summary_lib.scalar('/name/with/leading/slash', c)
self.assertEqual(s3.op.name, 'name/with/leading/slash')
def testSummaryWithFamilyMetaGraphExport(self):
with ops.name_scope('outer'):
i = constant_op.constant(11)
summ = summary_lib.scalar('inner', i)
self.assertEquals(summ.op.name, 'outer/inner')
summ_f = summary_lib.scalar('inner', i, family='family')
self.assertEquals(summ_f.op.name, 'outer/family/inner')
metagraph_def, _ = meta_graph.export_scoped_meta_graph(export_scope='outer')
with ops.Graph().as_default() as g:
meta_graph.import_scoped_meta_graph(metagraph_def, graph=g,
import_scope='new_outer')
# The summaries should exist, but with outer scope renamed.
new_summ = g.get_tensor_by_name('new_outer/inner:0')
new_summ_f = g.get_tensor_by_name('new_outer/family/inner:0')
# However, the tags are unaffected.
with self.test_session() as s:
new_summ_str, new_summ_f_str = s.run([new_summ, new_summ_f])
new_summ_pb = summary_pb2.Summary()
new_summ_pb.ParseFromString(new_summ_str)
self.assertEquals('outer/inner', new_summ_pb.value[0].tag)
new_summ_f_pb = summary_pb2.Summary()
new_summ_f_pb.ParseFromString(new_summ_f_str)
self.assertEquals('family/outer/family/inner',
new_summ_f_pb.value[0].tag)
if __name__ == '__main__':
test.main()
|
nikolay-fedotov/tempest
|
refs/heads/master
|
tempest/api_schema/response/compute/v2/servers.py
|
4
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.response.compute import parameter_types
from tempest.api_schema.response.compute import servers
create_server = {
'status_code': [202],
'response_body': {
'type': 'object',
'properties': {
'server': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'security_groups': {'type': 'array'},
'links': parameter_types.links,
'OS-DCF:diskConfig': {'type': 'string'}
},
# NOTE: OS-DCF:diskConfig is API extension, and some
# environments return a response without the attribute.
# So it is not 'required'.
'required': ['id', 'security_groups', 'links']
}
},
'required': ['server']
}
}
create_server_with_admin_pass = copy.deepcopy(create_server)
create_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'adminPass': {'type': 'string'}})
create_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('adminPass')
update_server = copy.deepcopy(servers.base_update_get_server)
update_server['response_body']['properties']['server']['properties'].update({
'hostId': {'type': 'string'},
'OS-DCF:diskConfig': {'type': 'string'},
'accessIPv4': parameter_types.access_ip_v4,
'accessIPv6': parameter_types.access_ip_v6
})
update_server['response_body']['properties']['server']['required'].append(
# NOTE: OS-DCF:diskConfig and accessIPv4/v6 are API
# extensions, and some environments return a response
# without these attributes. So they are not 'required'.
'hostId'
)
get_server = copy.deepcopy(servers.base_update_get_server)
get_server['response_body']['properties']['server']['properties'].update({
'key_name': {'type': ['string', 'null']},
'hostId': {'type': 'string'},
# NOTE: Non-admin users also can see "OS-SRV-USG" and "OS-EXT-AZ"
# attributes.
'OS-SRV-USG:launched_at': {'type': ['string', 'null']},
'OS-SRV-USG:terminated_at': {'type': ['string', 'null']},
'OS-EXT-AZ:availability_zone': {'type': 'string'},
# NOTE: Admin users only can see "OS-EXT-STS" and "OS-EXT-SRV-ATTR"
# attributes.
'OS-EXT-STS:task_state': {'type': ['string', 'null']},
'OS-EXT-STS:vm_state': {'type': 'string'},
'OS-EXT-STS:power_state': {'type': 'integer'},
'OS-EXT-SRV-ATTR:host': {'type': ['string', 'null']},
'OS-EXT-SRV-ATTR:instance_name': {'type': 'string'},
'OS-EXT-SRV-ATTR:hypervisor_hostname': {'type': ['string', 'null']},
'os-extended-volumes:volumes_attached': {'type': 'array'},
'OS-DCF:diskConfig': {'type': 'string'},
'accessIPv4': parameter_types.access_ip_v4,
'accessIPv6': parameter_types.access_ip_v6,
'config_drive': {'type': 'string'}
})
get_server['response_body']['properties']['server']['required'].append(
# NOTE: OS-SRV-USG, OS-EXT-AZ, OS-EXT-STS, OS-EXT-SRV-ATTR,
# os-extended-volumes, OS-DCF and accessIPv4/v6 are API
# extension, and some environments return a response without
# these attributes. So they are not 'required'.
'hostId'
)
list_virtual_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'virtual_interfaces': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'mac_address': parameter_types.mac_address,
'OS-EXT-VIF-NET:net_id': {'type': 'string'}
},
# 'OS-EXT-VIF-NET:net_id' is API extension So it is
# not defined as 'required'
'required': ['id', 'mac_address']
}
}
},
'required': ['virtual_interfaces']
}
}
attach_volume = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumeAttachment': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
'serverId': {'type': ['integer', 'string']}
},
'required': ['id', 'device', 'volumeId', 'serverId']
}
},
'required': ['volumeAttachment']
}
}
detach_volume = {
'status_code': [202]
}
set_get_server_metadata_item = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'meta': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'required': ['meta']
}
}
list_addresses_by_network = {
'status_code': [200],
'response_body': parameter_types.addresses
}
server_actions_confirm_resize = copy.deepcopy(
servers.server_actions_delete_password)
list_addresses = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'addresses': parameter_types.addresses
},
'required': ['addresses']
}
}
common_server_group = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'name': {'type': 'string'},
'policies': {
'type': 'array',
'items': {'type': 'string'}
},
# 'members' attribute contains the array of instance's UUID of
# instances present in server group
'members': {
'type': 'array',
'items': {'type': 'string'}
},
'metadata': {'type': 'object'}
},
'required': ['id', 'name', 'policies', 'members', 'metadata']
}
create_get_server_group = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server_group': common_server_group
},
'required': ['server_group']
}
}
delete_server_group = {
'status_code': [204]
}
list_server_groups = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server_groups': {
'type': 'array',
'items': common_server_group
}
},
'required': ['server_groups']
}
}
instance_actions_object = copy.deepcopy(servers.common_instance_actions)
instance_actions_object[
'properties'].update({'instance_uuid': {'type': 'string'}})
instance_actions_object['required'].extend(['instance_uuid'])
list_instance_actions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instanceActions': {
'type': 'array',
'items': instance_actions_object
}
},
'required': ['instanceActions']
}
}
get_instance_actions_object = copy.deepcopy(servers.common_get_instance_action)
get_instance_actions_object[
'properties'].update({'instance_uuid': {'type': 'string'}})
get_instance_actions_object['required'].extend(['instance_uuid'])
get_instance_action = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instanceAction': get_instance_actions_object
},
'required': ['instanceAction']
}
}
list_servers_detail = copy.deepcopy(servers.base_list_servers_detail)
list_servers_detail['response_body']['properties']['servers']['items'][
'properties'].update({
'hostId': {'type': 'string'},
'OS-DCF:diskConfig': {'type': 'string'},
'accessIPv4': parameter_types.access_ip_v4,
'accessIPv6': parameter_types.access_ip_v6
})
# NOTE(GMann): OS-DCF:diskConfig and accessIPv4/v6 are API
# extensions, and some environments return a response
# without these attributes. So they are not 'required'.
list_servers_detail['response_body']['properties']['servers']['items'][
'required'].append('hostId')
rebuild_server = copy.deepcopy(update_server)
rebuild_server['status_code'] = [202]
del rebuild_server['response_body']['properties']['server'][
'properties']['OS-DCF:diskConfig']
rebuild_server_with_admin_pass = copy.deepcopy(rebuild_server)
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'properties'].update({'adminPass': {'type': 'string'}})
rebuild_server_with_admin_pass['response_body']['properties']['server'][
'required'].append('adminPass')
rescue_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'adminPass': {'type': 'string'}
},
'required': ['adminPass']
}
}
|
Captain-Coder/tribler
|
refs/heads/devel
|
Tribler/Test/Community/Market/test_block.py
|
1
|
from __future__ import absolute_import
from twisted.internet.defer import inlineCallbacks
from Tribler.Test.test_as_server import AbstractServer
from Tribler.community.market.block import MarketBlock
from Tribler.community.market.core.assetamount import AssetAmount
from Tribler.community.market.core.assetpair import AssetPair
from Tribler.community.market.core.message import TraderId
from Tribler.community.market.core.order import OrderId, OrderNumber
from Tribler.community.market.core.tick import Ask
from Tribler.community.market.core.timeout import Timeout
from Tribler.community.market.core.timestamp import Timestamp
from Tribler.community.market.core.transaction import Transaction, TransactionId, TransactionNumber
class TestMarketBlock(AbstractServer):
"""
This class contains tests for a TrustChain block as used in the market.
"""
@inlineCallbacks
def setUp(self):
yield super(TestMarketBlock, self).setUp()
self.ask = Ask(OrderId(TraderId('0' * 40), OrderNumber(1)),
AssetPair(AssetAmount(30, 'BTC'), AssetAmount(30, 'MB')), Timeout(30), Timestamp(0.0), True)
self.bid = Ask(OrderId(TraderId('1' * 40), OrderNumber(1)),
AssetPair(AssetAmount(30, 'BTC'), AssetAmount(30, 'MB')), Timeout(30), Timestamp(0.0), False)
self.transaction = Transaction(TransactionId(TraderId('0' * 40), TransactionNumber(1)),
AssetPair(AssetAmount(30, 'BTC'), AssetAmount(30, 'MB')),
OrderId(TraderId('0' * 40), OrderNumber(1)),
OrderId(TraderId('1' * 40), OrderNumber(1)), Timestamp(0.0))
ask_tx = self.ask.to_block_dict()
bid_tx = self.bid.to_block_dict()
self.tick_block = MarketBlock()
self.tick_block.type = 'ask'
self.tick_block.transaction = {'tick': ask_tx}
self.cancel_block = MarketBlock()
self.cancel_block.type = 'cancel_order'
self.cancel_block.transaction = {'trader_id': 'a' * 40, 'order_number': 1}
self.tx_block = MarketBlock()
self.tx_block.type = 'tx_init'
self.tx_block.transaction = {
'ask': ask_tx,
'bid': bid_tx,
'tx': self.transaction.to_dictionary()
}
payment = {
'trader_id': 'a' * 40,
'transaction_number': 3,
'transferred': {
'amount': 3,
'type': 'BTC'
},
'payment_id': 'a',
'address_from': 'a',
'address_to': 'b',
'timestamp': 1234.3,
'success': True
}
self.payment_block = MarketBlock()
self.payment_block.type = 'tx_payment'
self.payment_block.transaction = {'payment': payment}
def test_tick_block(self):
"""
Test whether a tick block can be correctly verified
"""
self.assertTrue(self.tick_block.is_valid_tick_block())
self.tick_block.transaction['tick']['timeout'] = -1
self.assertFalse(self.tick_block.is_valid_tick_block())
self.tick_block.transaction['tick']['timeout'] = 3600
self.tick_block.type = 'test'
self.assertFalse(self.tick_block.is_valid_tick_block())
self.tick_block.type = 'ask'
self.tick_block.transaction['test'] = self.tick_block.transaction.pop('tick')
self.assertFalse(self.tick_block.is_valid_tick_block())
self.tick_block.transaction['tick'] = self.tick_block.transaction.pop('test')
self.tick_block.transaction['tick'].pop('timeout')
self.assertFalse(self.tick_block.is_valid_tick_block())
self.tick_block.transaction['tick']['timeout'] = "300"
self.assertFalse(self.tick_block.is_valid_tick_block())
self.tick_block.transaction['tick']['timeout'] = 300
self.tick_block.transaction['tick']['trader_id'] = 'g' * 40
self.assertFalse(self.tick_block.is_valid_tick_block())
# Make the asset pair invalid
assets = self.tick_block.transaction['tick']['assets']
self.tick_block.transaction['tick']['trader_id'] = 'a' * 40
assets['test'] = assets.pop('first')
self.assertFalse(self.tick_block.is_valid_tick_block())
assets['first'] = assets.pop('test')
assets['first']['test'] = assets['first'].pop('amount')
self.assertFalse(self.tick_block.is_valid_tick_block())
assets['first']['amount'] = assets['first']['test']
assets['second']['test'] = assets['second'].pop('amount')
self.assertFalse(self.tick_block.is_valid_tick_block())
assets['second']['amount'] = assets['second']['test']
assets['first']['amount'] = 3.4
self.assertFalse(self.tick_block.is_valid_tick_block())
assets['first']['amount'] = 2 ** 64
self.assertFalse(self.tick_block.is_valid_tick_block())
assets['first']['amount'] = 3
assets['second']['type'] = 4
self.assertFalse(self.tick_block.is_valid_tick_block())
def test_cancel_block(self):
"""
Test whether a cancel block can be correctly verified
"""
self.assertTrue(self.cancel_block.is_valid_cancel_block())
self.cancel_block.type = 'cancel'
self.assertFalse(self.cancel_block.is_valid_cancel_block())
self.cancel_block.type = 'cancel_order'
self.cancel_block.transaction.pop('trader_id')
self.assertFalse(self.cancel_block.is_valid_cancel_block())
self.cancel_block.transaction['trader_id'] = 3
self.assertFalse(self.cancel_block.is_valid_cancel_block())
def test_tx_init_done_block(self):
"""
Test whether a tx_init/tx_done block can be correctly verified
"""
self.assertTrue(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.type = 'test'
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.type = 'tx_init'
self.tx_block.transaction['test'] = self.tx_block.transaction.pop('ask')
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.transaction['ask'] = self.tx_block.transaction.pop('test')
self.tx_block.transaction['ask']['timeout'] = 3.44
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.transaction['ask']['timeout'] = 3
self.tx_block.transaction['bid']['timeout'] = 3.44
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.transaction['bid']['timeout'] = 3
self.tx_block.transaction['tx'].pop('trader_id')
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.transaction['tx']['trader_id'] = 'a' * 40
self.tx_block.transaction['tx']['test'] = 3
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.transaction['tx'].pop('test')
self.tx_block.transaction['tx']['trader_id'] = 'a'
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.transaction['tx']['trader_id'] = 'a' * 40
self.tx_block.transaction['tx']['assets']['first']['amount'] = 3.4
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.transaction['tx']['assets']['first']['amount'] = 3
self.tx_block.transaction['tx']['transferred']['first']['amount'] = 3.4
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
self.tx_block.transaction['tx']['transferred']['first']['amount'] = 3
self.tx_block.transaction['tx']['transaction_number'] = 3.4
self.assertFalse(self.tx_block.is_valid_tx_init_done_block())
def test_tx_payment_block(self):
"""
Test whether a tx_payment block can be correctly verified
"""
self.assertTrue(self.payment_block.is_valid_tx_payment_block())
self.payment_block.type = 'test'
self.assertFalse(self.payment_block.is_valid_tx_payment_block())
self.payment_block.type = 'tx_payment'
self.payment_block.transaction['test'] = self.payment_block.transaction.pop('payment')
self.assertFalse(self.payment_block.is_valid_tx_payment_block())
self.payment_block.transaction['payment'] = self.payment_block.transaction.pop('test')
self.payment_block.transaction['payment'].pop('address_to')
self.assertFalse(self.payment_block.is_valid_tx_payment_block())
self.payment_block.transaction['payment']['address_to'] = 'a'
self.payment_block.transaction['payment']['test'] = 'a'
self.assertFalse(self.payment_block.is_valid_tx_payment_block())
self.payment_block.transaction['payment'].pop('test')
self.payment_block.transaction['payment']['address_to'] = 3
self.assertFalse(self.payment_block.is_valid_tx_payment_block())
self.payment_block.transaction['payment']['address_to'] = 'a'
self.payment_block.transaction['payment']['trader_id'] = 'a' * 39
self.assertFalse(self.payment_block.is_valid_tx_payment_block())
def test_is_valid_asset_pair(self):
"""
Test the method to verify whether an asset pair is valid
"""
self.assertFalse(MarketBlock.is_valid_asset_pair({'a': 'b'}))
self.assertFalse(MarketBlock.is_valid_asset_pair({'first': {'amount': 3, 'type': 'DUM1'},
'second': {'amount': 3}}))
self.assertFalse(MarketBlock.is_valid_asset_pair({'first': {'type': 'DUM1'},
'second': {'amount': 3, 'type': 'DUM2'}}))
self.assertFalse(MarketBlock.is_valid_asset_pair({'first': {'amount': "4", 'type': 'DUM1'},
'second': {'amount': 3, 'type': 'DUM2'}}))
self.assertFalse(MarketBlock.is_valid_asset_pair({'first': {'amount': 4, 'type': 'DUM1'},
'second': {'amount': "3", 'type': 'DUM2'}}))
self.assertFalse(MarketBlock.is_valid_asset_pair({'first': {'amount': -4, 'type': 'DUM1'},
'second': {'amount': 3, 'type': 'DUM2'}}))
|
ErykB2000/home-assistant
|
refs/heads/master
|
homeassistant/components/notify/nma.py
|
10
|
"""
homeassistant.components.notify.nma
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
NMA (Notify My Android) notification service.
Configuration:
To use the NMA notifier you will need to add something like the following
to your config/configuration.yaml
notify:
platform: nma
api_key: YOUR_API_KEY
VARIABLES:
api_key
*Required
Enter the API key for NMA. Go to https://www.notifymyandroid.com and create a
new API key to use with Home Assistant.
Details for the API : https://www.notifymyandroid.com/api.jsp
"""
import logging
import xml.etree.ElementTree as ET
from homeassistant.helpers import validate_config
from homeassistant.components.notify import (
DOMAIN, ATTR_TITLE, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'https://www.notifymyandroid.com/publicapi/'
def get_service(hass, config):
""" Get the NMA notification service. """
if not validate_config(config,
{DOMAIN: [CONF_API_KEY]},
_LOGGER):
return None
try:
# pylint: disable=unused-variable
from requests import Session
except ImportError:
_LOGGER.exception(
"Unable to import requests. "
"Did you maybe not install the 'Requests' package?")
return None
nma = Session()
response = nma.get(_RESOURCE + 'verify',
params={"apikey": config[DOMAIN][CONF_API_KEY]})
tree = ET.fromstring(response.content)
if tree[0].tag == 'error':
_LOGGER.error("Wrong API key supplied. %s", tree[0].text)
else:
return NmaNotificationService(config[DOMAIN][CONF_API_KEY])
# pylint: disable=too-few-public-methods
class NmaNotificationService(BaseNotificationService):
""" Implements notification service for NMA. """
def __init__(self, api_key):
# pylint: disable=no-name-in-module, unused-variable
from requests import Session
self._api_key = api_key
self._data = {"apikey": self._api_key}
self.nma = Session()
def send_message(self, message="", **kwargs):
""" Send a message to a user. """
title = kwargs.get(ATTR_TITLE)
self._data['application'] = 'home-assistant'
self._data['event'] = title
self._data['description'] = message
self._data['priority'] = 0
response = self.nma.get(_RESOURCE + 'notify',
params=self._data)
tree = ET.fromstring(response.content)
if tree[0].tag == 'error':
_LOGGER.exception(
"Unable to perform request. Error: %s", tree[0].text)
|
DmitryYurov/BornAgain
|
refs/heads/develop
|
Examples/python/fitting/ex01_BasicExamples/minimizer_settings.py
|
2
|
"""
Fitting example: running same fit using various minimizer and their settings.
"""
import bornagain as ba
from bornagain import deg, angstrom, nm
def get_sample(params):
"""
Returns a sample with uncorrelated cylinders and prisms on a substrate.
"""
cylinder_height = params["cylinder_height"]
cylinder_radius = params["cylinder_radius"]
prism_height = params["prism_height"]
prism_base_edge = params["prism_base_edge"]
# defining materials
m_air = ba.HomogeneousMaterial("Air", 0.0, 0.0)
m_substrate = ba.HomogeneousMaterial("Substrate", 6e-6, 2e-8)
m_particle = ba.HomogeneousMaterial("Particle", 6e-4, 2e-8)
# collection of particles
cylinder_ff = ba.FormFactorCylinder(cylinder_radius, cylinder_height)
cylinder = ba.Particle(m_particle, cylinder_ff)
prism_ff = ba.FormFactorPrism3(prism_base_edge, prism_height)
prism = ba.Particle(m_particle, prism_ff)
layout = ba.ParticleLayout()
layout.addParticle(cylinder, 0.5)
layout.addParticle(prism, 0.5)
interference = ba.InterferenceFunctionNone()
layout.setInterferenceFunction(interference)
# air layer with particles and substrate form multi layer
air_layer = ba.Layer(m_air)
air_layer.addLayout(layout)
substrate_layer = ba.Layer(m_substrate, 0)
multi_layer = ba.MultiLayer()
multi_layer.addLayer(air_layer)
multi_layer.addLayer(substrate_layer)
return multi_layer
def get_simulation(params):
"""
Returns a GISAXS simulation with beam and detector defined
"""
simulation = ba.GISASSimulation()
simulation.setDetectorParameters(100, -1.0*deg, 1.0*deg,
100, 0.0*deg, 2.0*deg)
simulation.setBeamParameters(1.0*angstrom, 0.2*deg, 0.0*deg)
simulation.setBeamIntensity(1e+08)
simulation.setSample(get_sample(params))
return simulation
def create_real_data():
"""
Generating "real" data from simulated image with default parameters.
"""
params = {'cylinder_height': 5.0*nm, 'cylinder_radius': 5.0*nm,
'prism_height': 5.0*nm, 'prism_base_edge': 5.0*nm}
simulation = get_simulation(params)
simulation.runSimulation()
return simulation.result().array()
def run_fitting():
"""
main function to run fitting
"""
real_data = create_real_data()
# prints info about available minimizers
print(ba.MinimizerFactory().catalogueToString())
# prints detailed info about available minimizers and their options
print(ba.MinimizerFactory().catalogueDetailsToString())
fit_objective = ba.FitObjective()
fit_objective.addSimulationAndData(get_simulation, real_data, 1.0)
fit_objective.initPrint(10)
params = ba.Parameters()
params.add("cylinder_height", 4.*nm, min=0.01)
params.add("cylinder_radius", 6.*nm, min=0.01)
params.add("prism_height", 4.*nm, min=0.01)
params.add("prism_base_edge", 12.*nm, min=0.01)
minimizer = ba.Minimizer()
# Uncomment one of the line below to adjust minimizer settings
"""
Setting Minuit2 minimizer with Migrad algorithm, limiting number of iterations.
Minimization will try to respect MaxFunctionCalls value.
"""
# minimizer.setMinimizer("Minuit2", "Migrad", "MaxFunctionCalls=50")
"""
Setting two options at once.
Strategy=2 promises more accurate fit.
"""
# minimizer.setMinimizer("Minuit2", "Migrad", "MaxFunctionCalls=500;Strategy=2")
"""
Setting Minuit2 minimizer with Fumili algorithm.
"""
# minimizer.setMinimizer("Minuit2", "Fumili")
"""
Setting Levenberg-Marquardt algorithm.
"""
# minimizer.setMinimizer("GSLLMA")
result = minimizer.minimize(fit_objective.evaluate_residuals, params)
fit_objective.finalize(result)
print("Fitting completed.")
print("chi2:", result.minValue())
for fitPar in result.parameters():
print(fitPar.name(), fitPar.value, fitPar.error)
if __name__ == '__main__':
run_fitting()
|
mgax/beancount
|
refs/heads/master
|
lib/python/beancount/inventory.py
|
1
|
"""
Trade inventory.
This module provides a very flexible inventory object, used to maintain position
and to calculate P+L for a positions, for a single product. In order to compute
P+L, we need to book current trades with past trades, and our inventory object
supports various booking methods to select which positions get booked (FIFO,
LIFO, custom), and a few ways to price the booked positions (real, average
cost). It can also support manual booking of specific trades, in case an
external booking algorithm already takes place.
Note that to avoid precision errors, all prices are expressed as integers scaled
to 10^8.
(We have intended to make this object as flexible as possible, because this is a
generic problem that tends to be badly solved in the financial industry with
error-prone and confusing calculations, and can easily be solved once and for
all.)
"""
# stdlib imports
from collections import deque
from decimal import Decimal
__all__ = ('Inventory', 'FIFOInventory', 'LIFOInventory', 'AvgInventory',
'BOOKING_FIFO', 'BOOKING_LIFO', 'BOOKING_NONE',
'PRICING_REAL', 'PRICING_AVERAGE')
class Position(object):
""" A position that we're holding. Its size represents the remaining size,
and not the original trade size."""
def __init__(self, inv, price, size, obj=None):
# The inventory that this position is attached to. (Note that this is
# only necessary to access the integer representation converters.)
self.inv = inv
# The original price paid for the position.
self.price = price
# The current/remaining size of that position.
self.size = size
# An trade object that represents a psition (it can be any type of your
# choosing). These objects are returned when matched and/or removed.
self.obj = obj
def __str__(self):
S = self.inv.S
if self.obj is not None:
return '%s @ %s : %s' % (self.size, S(self.price), self.obj)
else:
return '%s @ %s' % (self.size, S(self.price))
def cost(self):
return self.price * self.size
# Booking methods.
def BOOKING_FIFO(inv):
"Return the next trade for FIFO booking."
return inv.positions[0]
def BOOKING_LIFO(inv):
"Return the next trade for LIFO booking."
return inv.positions[-1]
def BOOKING_NONE(inv):
"Prevent from using automatic booking."
raise IndexError("Automatic booking is disabled. "
"You need to close your trades manually.")
# Pricing methods.
PRICING_REAL = object()
PRICING_AVERAGE = object()
class Inventory(object):
"An interface for inventory objects."
def __init__(self, booking=BOOKING_NONE, pricing=PRICING_REAL):
# The booking method, a function object that is supposed to return the
# next position to be matched.
self.booking_findnext = booking
# The pricing method.
self.pricing_method = pricing
# Functions that convert into integer, float and string representation.
if not hasattr(self, 'L'):
self.L = lambda x: Decimal(str(x))
if not hasattr(self, 'F'):
self.F = float
if not hasattr(self, 'S'):
self.S = str
self.reset()
def reset(self):
"Reset the realized pnl and position (initial state)."
# The last marked price for the underlying product.
self.mark = None
# The realized P+L for this inventory.
self._realized_pnl = self.L(0)
# The price of the last trade.
self.last_trade_price = None
# For precision reasons, we track the cost of our positions separately
# (we could otherwise adjust each of them to the average every time it
# changes, introducing numerical error, but simplifying the code). This
# is only used for average cost pricing.
self.cost4avg = self.L(0)
# A deque of Position objects for our active position.
self.positions = deque()
def reset_position(self):
"""
Reset the current position to 0. After calling this method, the P+L is
only the unrealized P+L. We return the list of trade objects for the
eliminated positions.
"""
eliminated = [pos.obj for pos in self.positions]
self.positions = deque()
return eliminated
def consolidate(self, price):
"""
Book all of the current inventory's position at the given price
(typically, some sort of settlement price) and set the position cost at
that price. This method does not affect the position, but it transfers
unrealized P+L into realized P+L. This means that if the mark price is
equal to the consolidation price, the unrealized P+L is 0 after this
method is called if the consolidation price is equal to the mark price.
"""
# We extract the PnL from each position by changing its trade price.
realized_pnl = 0
for pos in self.positions:
realized_pnl += (price - pos.price) * pos.size
pos.price = price
self._realized_pnl += realized_pnl
if self.pricing_method is PRICING_AVERAGE:
self.cost4avg += realized_pnl
assert self.cost4avg == self.realcost()
# Another way to accomplish this would be to perform two trades, but
# that would delete the information about the trade objects. We used to
# do it like this:
## pos = self.position()
## self.trade(price, -pos)
## assert self.position() == 0
## self.trade(price, pos)
def reset_pnl(self):
"""
Reset the realized P+L to 0 and returns it. This method effectively
transfers out the realized P+L from this inventory.
"""
rpnl, self._realized_pnl = self._realized_pnl, 0
return rpnl
def setmark(self, price):
"Set the mark price for the current product."
self.mark = price
def setmarkexit(self, bid, ask):
"Set the mark price at the price to exit."
self.mark = bid if self.position() > 0 else ask
def _sanity_check(self):
"Perform some internal sanity checks."
# Check that the signs of our inventory are all the same.
if self.positions:
size = self.positions[0].size
for pos in self.positions:
assert pos.size * size > 0, pos
def position(self):
"Return the current position this inventory is holding."
self._sanity_check()
return sum(pos.size for pos in self.positions) if self.positions else self.L(0)
def realcost(self):
"Return the real cost of our current positions."
return sum(pos.cost() for pos in self.positions) if self.positions else self.L(0)
def cost(self):
"Return the original cost of our active position."
if self.pricing_method is PRICING_REAL:
return self.realcost()
else:
return self.cost4avg
def value(self):
"Return the marked value of the entire position."
pos = self.position()
if pos != 0:
if self.mark is None:
raise ValueError("You need to set the mark to obtain the pnl")
return pos * self.mark
else:
return self.L(0)
def avgcost(self):
"Return the average price paid for each unit of the current position."
pos = self.position()
return self.L(self.F(self.cost()) / pos) if pos != 0 else self.L(0)
def realized_pnl(self):
return self._realized_pnl
def unrealized_pnl(self):
"Return the P+L for our current position (not including past realized P+L."
pos = self.position()
if pos == 0:
return self.L(0)
if self.mark is None:
raise ValueError("You need to set the mark to obtain the pnl")
return self.position() * self.mark - self.cost()
def pnl(self):
"Return the P+L for our current position (not including past realized P+L."
return self._realized_pnl + self.unrealized_pnl()
def dump(self):
print ',---------------', self
print '| position ', self.position()
print '| mark ', self.S(self.mark) if self.mark else None
print '| avgcost ', self.S(self.avgcost() or 0)
print '| value ', self.S(self.value()) if self.mark else None
print '| cost ', self.S(self.cost())
print '| cost4avg ', self.S(self.cost4avg)
print '| unrealized_pnl ', self.S(self.unrealized_pnl()) if self.mark else None
print '| realized_pnl ', self.S(self.realized_pnl())
print '| pnl ', self.S(self.pnl()) if self.mark else None
print '| inventory: '
for pos in self.positions:
print '| %s' % pos
print '`---------------', self
def close_all(self, price):
"Close all the positions at the mark price."
self.trade(price, -self.position())
assert self.position() == 0
def _findpos(self, obj):
"Return the position that corresponds to a specific trade object."
for pos in self.positions:
if pos.obj is obj:
return pos
else:
return None
def close(self, obj, price, quant=None):
""" Close the position for the trade 'obj' at the given 'price'. If
'quant' is specified, close the position only partially (otherwise close
the entire position). Note that the outcome of using this method does
not depend on the selected booking method."""
pos = self._findpos(obj)
if pos is None:
raise KeyError("Invalid trade object, could not be found: %s" % obj)
if quant is not None:
if quant * pos.size <= 0:
raise KeyError("Invalid close size %s of %s." % (quant, pos.size))
if abs(quant) > abs(pos.size):
raise KeyError("Trying to close %s of %s." % (quant, pos.size))
else:
quant = -pos.size
return self._trade(price, quant, None, lambda inv: pos)
def trade(self, price, quant, obj=None):
""" Book a trade for size 'quant' at the given 'price', using the
default booking method. Return list of trade objects booked and
the PnL realized by this trade (if any).
Note: if you want to book positions manually, use the close() method."""
return self._trade(price, quant, obj, self.booking_findnext)
def _trade(self, price, quant, obj, nextpos):
""" Trade booking implementation. We book trades at price 'price' for
the given size 'quant' only. 'obj' is the trade object to this trade,
and is inserted in the new Position object if there is remaining size.
'nextpos' is a function that will return the next Position object to be
booked against (this is the booking method)."""
## trace('__________________ _trade', price, quant, obj, booking)
# A list of (trade-object, quantity) booked.
booked = []
# Total size booked during this trade.
total_booked = 0
# "Real" PnL for the booked trades.
real_pnl = 0
# Book the new trade against existing positions if the trade is not on
# the same side as our current position.
position = self.position()
if quant * position < 0:
# Process all the positions.
done = 0
while self.positions:
pos = nextpos(self)
if abs(quant) >= abs(pos.size):
# This position is entirely consumed by the trade.
booked_quant = pos.size
self.positions.remove(pos) # This may become slow.
else:
# This position is only partially consumed by the trade.
booked_quant = -quant
pos.size += quant
done = 1
quant += booked_quant
total_booked += booked_quant
booked.append( (pos.obj, booked_quant) )
real_pnl += booked_quant * (price - pos.price)
if done or quant == 0:
break
assert quant * self.position() >= 0
# Price the booked trades into the realized PnL, depending on method.
if self.pricing_method is PRICING_REAL:
realized_pnl = real_pnl
else:
if position == 0:
assert total_booked == 0, total_booked
realized_pnl = 0
else:
realized_cost = self.L((total_booked*self.F(self.cost4avg))/position)
realized_pnl = total_booked * price - realized_cost
self.cost4avg -= realized_cost
self._realized_pnl += realized_pnl
if total_booked == 0:
assert realized_pnl == 0, realized_pnl
else:
booked.append( (obj, -total_booked) )
# Append the remainder of our trade to the inventory if not all was
# booked.
if quant != 0:
newpos = Position(self, price, quant, obj)
self.positions.append(newpos)
if self.pricing_method is PRICING_AVERAGE:
self.cost4avg += newpos.cost()
self.last_trade_price = price
return booked, realized_pnl
class FIFOInventory(Inventory):
def __init__(self):
Inventory.__init__(self, booking=BOOKING_FIFO, pricing=PRICING_REAL)
class LIFOInventory(Inventory):
def __init__(self):
Inventory.__init__(self, booking=BOOKING_LIFO, pricing=PRICING_REAL)
class AvgInventory(Inventory):
def __init__(self):
Inventory.__init__(self, booking=BOOKING_FIFO, pricing=PRICING_AVERAGE)
# Note: the booking method matters little here, other than for the
# ordering of the trades that get closed.
|
Mec-iS/semantic-data-chronos
|
refs/heads/master
|
lib/longtask/__init__.py
|
6
|
from longtask import *
|
davidvon/pipa-pay-server
|
refs/heads/master
|
site-packages/flask/module.py
|
850
|
# -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from .blueprints import Blueprint
def blueprint_is_module(bp):
"""Used to figure out if something is actually a module"""
return isinstance(bp, Module)
class Module(Blueprint):
"""Deprecated module support. Until Flask 0.6 modules were a different
name of the concept now available as blueprints in Flask. They are
essentially doing the same but have some bad semantics for templates and
static files that were fixed with blueprints.
.. versionchanged:: 0.7
Modules were deprecated in favor for blueprints.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
Blueprint.__init__(self, name, import_name, url_prefix=url_prefix,
subdomain=subdomain, template_folder='templates')
if os.path.isdir(os.path.join(self.root_path, 'static')):
self._static_folder = 'static'
|
liorvh/infernal-twin
|
refs/heads/master
|
build/pillow/PIL/GdImageFile.py
|
52
|
#
# The Python Imaging Library.
# $Id$
#
# GD file handling
#
# History:
# 1996-04-12 fl Created
#
# Copyright (c) 1997 by Secret Labs AB.
# Copyright (c) 1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
# NOTE: This format cannot be automatically recognized, so the
# class is not registered for use with Image.open(). To open a
# gd file, use the GdImageFile.open() function instead.
# THE GD FORMAT IS NOT DESIGNED FOR DATA INTERCHANGE. This
# implementation is provided for convenience and demonstrational
# purposes only.
__version__ = "0.1"
from PIL import ImageFile, ImagePalette, _binary
from PIL._util import isPath
try:
import builtins
except ImportError:
import __builtin__
builtins = __builtin__
i16 = _binary.i16be
##
# Image plugin for the GD uncompressed format. Note that this format
# is not supported by the standard <b>Image.open</b> function. To use
# this plugin, you have to import the <b>GdImageFile</b> module and
# use the <b>GdImageFile.open</b> function.
class GdImageFile(ImageFile.ImageFile):
format = "GD"
format_description = "GD uncompressed images"
def _open(self):
# Header
s = self.fp.read(775)
self.mode = "L" # FIXME: "P"
self.size = i16(s[0:2]), i16(s[2:4])
# transparency index
tindex = i16(s[5:7])
if tindex < 256:
self.info["transparent"] = tindex
self.palette = ImagePalette.raw("RGB", s[7:])
self.tile = [("raw", (0, 0)+self.size, 775, ("L", 0, -1))]
##
# Load texture from a GD image file.
#
# @param filename GD file name, or an opened file handle.
# @param mode Optional mode. In this version, if the mode argument
# is given, it must be "r".
# @return An image instance.
# @exception IOError If the image could not be read.
def open(fp, mode="r"):
if mode != "r":
raise ValueError("bad mode")
if isPath(fp):
filename = fp
fp = builtins.open(fp, "rb")
else:
filename = ""
try:
return GdImageFile(fp, filename)
except SyntaxError:
raise IOError("cannot identify this image file")
|
lzambella/Qyoutube-dl
|
refs/heads/master
|
youtube_dl/extractor/sztvhu.py
|
148
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
class SztvHuIE(InfoExtractor):
_VALID_URL = r'http://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
_TEST = {
'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
'md5': 'a6df607b11fb07d0e9f2ad94613375cb',
'info_dict': {
'id': '20130909',
'ext': 'mp4',
'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren',
'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_file = self._search_regex(
r'file: "...:(.*?)",', webpage, 'video file')
title = self._html_search_regex(
r'<meta name="title" content="([^"]*?) - [^-]*? - [^-]*?"',
webpage, 'video title')
description = self._html_search_regex(
r'<meta name="description" content="([^"]*)"/>',
webpage, 'video description', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
video_url = 'http://media.sztv.hu/vod/' + video_file
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
|
mseln/klufweb
|
refs/heads/master
|
klufweb/static_page/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
nafitzgerald/allennlp
|
refs/heads/master
|
allennlp/service/predictors/__init__.py
|
1
|
"""
A :class:`~allennlp.server.predictors.predictor.Predictor` is
a wrapper for an AllenNLP ``Model``
that makes JSON predictions using JSON inputs. If you
want to serve up a model through the web service
(or using ``allennlp.commands.predict``), you'll need
a ``Predictor`` that wraps it.
"""
from .predictor import Predictor, DemoModel
from .bidaf import BidafPredictor
from .decomposable_attention import DecomposableAttentionPredictor
from .semantic_role_labeler import SemanticRoleLabelerPredictor
from .coref import CorefPredictor
from .sentence_tagger import SentenceTaggerPredictor
|
musically-ut/numpy
|
refs/heads/master
|
numpy/distutils/from_template.py
|
164
|
#!/usr/bin/python
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separeted words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
from __future__ import division, absolute_import, print_function
__all__ = ['process_str', 'process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while True:
m = routine_start_re.search(astr, ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr, start, m.end()):
while True:
i = astr.rfind('\n', ind, start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr, m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start, end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace('\,', '@comma@')
thelist = conv(repl)
names[name] = thelist
return names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr, names):
substr = substr.replace('\>', '@rightarrow@')
substr = substr.replace('\<', '@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace('\,', '@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r, names.get(r, None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"
" for <%s=%s>. Ignoring." %
(base_rule, ','.join(rules[base_rule]), r, thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name, (k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@', '>')
newstr = newstr.replace('@leftarrow@', '<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = '' #_head # using _head will break free-format files
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
writestr += newstr[oldend:sub[0]]
names.update(find_repl_patterns(newstr[oldend:sub[0]]))
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
|
hainm/scipy
|
refs/heads/master
|
scipy/io/netcdf.py
|
29
|
"""
NetCDF reader/writer module.
This module is used to read and create NetCDF files. NetCDF files are
accessed through the `netcdf_file` object. Data written to and from NetCDF
files are contained in `netcdf_variable` objects. Attributes are given
as member variables of the `netcdf_file` and `netcdf_variable` objects.
This module implements the Scientific.IO.NetCDF API to read and create
NetCDF files. The same API is also used in the PyNIO and pynetcdf
modules, allowing these modules to be used interchangeably when working
with NetCDF files.
"""
from __future__ import division, print_function, absolute_import
# TODO:
# * properly implement ``_FillValue``.
# * implement Jeff Whitaker's patch for masked variables.
# * fix character variables.
# * implement PAGESIZE for Python 2.6?
# The Scientific.IO.NetCDF API allows attributes to be added directly to
# instances of ``netcdf_file`` and ``netcdf_variable``. To differentiate
# between user-set attributes and instance attributes, user-set attributes
# are automatically stored in the ``_attributes`` attribute by overloading
#``__setattr__``. This is the reason why the code sometimes uses
#``obj.__dict__['key'] = value``, instead of simply ``obj.key = value``;
# otherwise the key would be inserted into userspace attributes.
__all__ = ['netcdf_file']
import warnings
import weakref
from operator import mul
import mmap as mm
import numpy as np
from numpy.compat import asbytes, asstr
from numpy import fromstring, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce
from scipy._lib.six import integer_types, text_type, binary_type
ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00'
ZERO = b'\x00\x00\x00\x00'
NC_BYTE = b'\x00\x00\x00\x01'
NC_CHAR = b'\x00\x00\x00\x02'
NC_SHORT = b'\x00\x00\x00\x03'
NC_INT = b'\x00\x00\x00\x04'
NC_FLOAT = b'\x00\x00\x00\x05'
NC_DOUBLE = b'\x00\x00\x00\x06'
NC_DIMENSION = b'\x00\x00\x00\n'
NC_VARIABLE = b'\x00\x00\x00\x0b'
NC_ATTRIBUTE = b'\x00\x00\x00\x0c'
TYPEMAP = {NC_BYTE: ('b', 1),
NC_CHAR: ('c', 1),
NC_SHORT: ('h', 2),
NC_INT: ('i', 4),
NC_FLOAT: ('f', 4),
NC_DOUBLE: ('d', 8)}
REVERSE = {('b', 1): NC_BYTE,
('B', 1): NC_CHAR,
('c', 1): NC_CHAR,
('h', 2): NC_SHORT,
('i', 4): NC_INT,
('f', 4): NC_FLOAT,
('d', 8): NC_DOUBLE,
# these come from asarray(1).dtype.char and asarray('foo').dtype.char,
# used when getting the types from generic attributes.
('l', 4): NC_INT,
('S', 1): NC_CHAR}
class netcdf_file(object):
"""
A file object for NetCDF data.
A `netcdf_file` object has two standard attributes: `dimensions` and
`variables`. The values of both are dictionaries, mapping dimension
names to their associated lengths and variable names to variables,
respectively. Application programs should never modify these
dictionaries.
All other attributes correspond to global attributes defined in the
NetCDF file. Global file attributes are created by assigning to an
attribute of the `netcdf_file` object.
Parameters
----------
filename : string or file-like
string -> filename
mode : {'r', 'w', 'a'}, optional
read-write-append mode, default is 'r'
mmap : None or bool, optional
Whether to mmap `filename` when reading. Default is True
when `filename` is a file name, False when `filename` is a
file-like object. Note that when mmap is in use, data arrays
returned refer directly to the mmapped data on disk, and the
file cannot be closed as long as references to it exist.
version : {1, 2}, optional
version of netcdf to read / write, where 1 means *Classic
format* and 2 means *64-bit offset format*. Default is 1. See
`here <http://www.unidata.ucar.edu/software/netcdf/docs/netcdf/Which-Format.html>`__
for more info.
Notes
-----
The major advantage of this module over other modules is that it doesn't
require the code to be linked to the NetCDF libraries. This module is
derived from `pupynere <https://bitbucket.org/robertodealmeida/pupynere/>`_.
NetCDF files are a self-describing binary data format. The file contains
metadata that describes the dimensions and variables in the file. More
details about NetCDF files can be found `here
<http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html>`__. There
are three main sections to a NetCDF data structure:
1. Dimensions
2. Variables
3. Attributes
The dimensions section records the name and length of each dimension used
by the variables. The variables would then indicate which dimensions it
uses and any attributes such as data units, along with containing the data
values for the variable. It is good practice to include a
variable that is the same name as a dimension to provide the values for
that axes. Lastly, the attributes section would contain additional
information such as the name of the file creator or the instrument used to
collect the data.
When writing data to a NetCDF file, there is often the need to indicate the
'record dimension'. A record dimension is the unbounded dimension for a
variable. For example, a temperature variable may have dimensions of
latitude, longitude and time. If one wants to add more temperature data to
the NetCDF file as time progresses, then the temperature variable should
have the time dimension flagged as the record dimension.
In addition, the NetCDF file header contains the position of the data in
the file, so access can be done in an efficient manner without loading
unnecessary data into memory. It uses the ``mmap`` module to create
Numpy arrays mapped to the data on disk, for the same purpose.
Note that when `netcdf_file` is used to open a file with mmap=True
(default for read-only), arrays returned by it refer to data
directly on the disk. The file should not be closed, and cannot be cleanly
closed when asked, if such arrays are alive. You may want to copy data arrays
obtained from mmapped Netcdf file if they are to be processed after the file
is closed, see the example below.
Examples
--------
To create a NetCDF file:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'w')
>>> f.history = 'Created for a test'
>>> f.createDimension('time', 10)
>>> time = f.createVariable('time', 'i', ('time',))
>>> time[:] = np.arange(10)
>>> time.units = 'days since 2008-01-01'
>>> f.close()
Note the assignment of ``range(10)`` to ``time[:]``. Exposing the slice
of the time variable allows for the data to be set in the object, rather
than letting ``range(10)`` overwrite the ``time`` variable.
To read the NetCDF file we just created:
>>> from scipy.io import netcdf
>>> f = netcdf.netcdf_file('simple.nc', 'r')
>>> print(f.history)
Created for a test
>>> time = f.variables['time']
>>> print(time.units)
days since 2008-01-01
>>> print(time.shape)
(10,)
>>> print(time[-1])
9
NetCDF files, when opened read-only, return arrays that refer
directly to memory-mapped data on disk:
>>> data = time[:]
>>> data.base.base
<mmap.mmap object at 0x7fe753763180>
If the data is to be processed after the file is closed, it needs
to be copied to main memory:
>>> data = time[:].copy()
>>> f.close()
>>> data.mean()
4.5
A NetCDF file can also be used as context manager:
>>> from scipy.io import netcdf
>>> with netcdf.netcdf_file('simple.nc', 'r') as f:
... print(f.history)
Created for a test
"""
def __init__(self, filename, mode='r', mmap=None, version=1):
"""Initialize netcdf_file from fileobj (str or file-like)."""
if mode not in 'rwa':
raise ValueError("Mode must be either 'r', 'w' or 'a'.")
if hasattr(filename, 'seek'): # file-like
self.fp = filename
self.filename = 'None'
if mmap is None:
mmap = False
elif mmap and not hasattr(filename, 'fileno'):
raise ValueError('Cannot use file object for mmap')
else: # maybe it's a string
self.filename = filename
omode = 'r+' if mode == 'a' else mode
self.fp = open(self.filename, '%sb' % omode)
if mmap is None:
mmap = True
if mode != 'r':
# Cannot read write-only files
mmap = False
self.use_mmap = mmap
self.mode = mode
self.version_byte = version
self.dimensions = {}
self.variables = {}
self._dims = []
self._recs = 0
self._recsize = 0
self._mm = None
self._mm_buf = None
if self.use_mmap:
self._mm = mm.mmap(self.fp.fileno(), 0, access=mm.ACCESS_READ)
self._mm_buf = np.frombuffer(self._mm, dtype=np.int8)
self._attributes = {}
if mode in 'ra':
self._read()
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def close(self):
"""Closes the NetCDF file."""
if not self.fp.closed:
try:
self.flush()
finally:
self.variables = {}
if self._mm_buf is not None:
ref = weakref.ref(self._mm_buf)
self._mm_buf = None
if ref() is None:
# self._mm_buf is gc'd, and we can close the mmap
self._mm.close()
else:
# we cannot close self._mm, since self._mm_buf is
# alive and there may still be arrays referring to it
warnings.warn((
"Cannot close a netcdf_file opened with mmap=True, when "
"netcdf_variables or arrays referring to its data still exist. "
"All data arrays obtained from such files refer directly to "
"data on disk, and must be copied before the file can be cleanly "
"closed. (See netcdf_file docstring for more information on mmap.)"
), category=RuntimeWarning)
self._mm = None
self.fp.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def createDimension(self, name, length):
"""
Adds a dimension to the Dimension section of the NetCDF data structure.
Note that this function merely adds a new dimension that the variables can
reference. The values for the dimension, if desired, should be added as
a variable using `createVariable`, referring to this dimension.
Parameters
----------
name : str
Name of the dimension (Eg, 'lat' or 'time').
length : int
Length of the dimension.
See Also
--------
createVariable
"""
self.dimensions[name] = length
self._dims.append(name)
def createVariable(self, name, type, dimensions):
"""
Create an empty variable for the `netcdf_file` object, specifying its data
type and the dimensions it uses.
Parameters
----------
name : str
Name of the new variable.
type : dtype or str
Data type of the variable.
dimensions : sequence of str
List of the dimension names used by the variable, in the desired order.
Returns
-------
variable : netcdf_variable
The newly created ``netcdf_variable`` object.
This object has also been added to the `netcdf_file` object as well.
See Also
--------
createDimension
Notes
-----
Any dimensions to be used by the variable should already exist in the
NetCDF data structure or should be created by `createDimension` prior to
creating the NetCDF variable.
"""
shape = tuple([self.dimensions[dim] for dim in dimensions])
shape_ = tuple([dim or 0 for dim in shape]) # replace None with 0 for numpy
type = dtype(type)
typecode, size = type.char, type.itemsize
if (typecode, size) not in REVERSE:
raise ValueError("NetCDF 3 does not support type %s" % type)
data = empty(shape_, dtype=type.newbyteorder("B")) # convert to big endian always for NetCDF 3
self.variables[name] = netcdf_variable(data, typecode, size, shape, dimensions)
return self.variables[name]
def flush(self):
"""
Perform a sync-to-disk flush if the `netcdf_file` object is in write mode.
See Also
--------
sync : Identical function
"""
if hasattr(self, 'mode') and self.mode in 'wa':
self._write()
sync = flush
def _write(self):
self.fp.seek(0)
self.fp.write(b'CDF')
self.fp.write(array(self.version_byte, '>b').tostring())
# Write headers and data.
self._write_numrecs()
self._write_dim_array()
self._write_gatt_array()
self._write_var_array()
def _write_numrecs(self):
# Get highest record count from all record variables.
for var in self.variables.values():
if var.isrec and len(var.data) > self._recs:
self.__dict__['_recs'] = len(var.data)
self._pack_int(self._recs)
def _write_dim_array(self):
if self.dimensions:
self.fp.write(NC_DIMENSION)
self._pack_int(len(self.dimensions))
for name in self._dims:
self._pack_string(name)
length = self.dimensions[name]
self._pack_int(length or 0) # replace None with 0 for record dimension
else:
self.fp.write(ABSENT)
def _write_gatt_array(self):
self._write_att_array(self._attributes)
def _write_att_array(self, attributes):
if attributes:
self.fp.write(NC_ATTRIBUTE)
self._pack_int(len(attributes))
for name, values in attributes.items():
self._pack_string(name)
self._write_values(values)
else:
self.fp.write(ABSENT)
def _write_var_array(self):
if self.variables:
self.fp.write(NC_VARIABLE)
self._pack_int(len(self.variables))
# Sort variable names non-recs first, then recs.
def sortkey(n):
v = self.variables[n]
if v.isrec:
return (-1,)
return v._shape
variables = sorted(self.variables, key=sortkey, reverse=True)
# Set the metadata for all variables.
for name in variables:
self._write_var_metadata(name)
# Now that we have the metadata, we know the vsize of
# each record variable, so we can calculate recsize.
self.__dict__['_recsize'] = sum([
var._vsize for var in self.variables.values()
if var.isrec])
# Set the data for all variables.
for name in variables:
self._write_var_data(name)
else:
self.fp.write(ABSENT)
def _write_var_metadata(self, name):
var = self.variables[name]
self._pack_string(name)
self._pack_int(len(var.dimensions))
for dimname in var.dimensions:
dimid = self._dims.index(dimname)
self._pack_int(dimid)
self._write_att_array(var._attributes)
nc_type = REVERSE[var.typecode(), var.itemsize()]
self.fp.write(asbytes(nc_type))
if not var.isrec:
vsize = var.data.size * var.data.itemsize
vsize += -vsize % 4
else: # record variable
try:
vsize = var.data[0].size * var.data.itemsize
except IndexError:
vsize = 0
rec_vars = len([v for v in self.variables.values()
if v.isrec])
if rec_vars > 1:
vsize += -vsize % 4
self.variables[name].__dict__['_vsize'] = vsize
self._pack_int(vsize)
# Pack a bogus begin, and set the real value later.
self.variables[name].__dict__['_begin'] = self.fp.tell()
self._pack_begin(0)
def _write_var_data(self, name):
var = self.variables[name]
# Set begin in file header.
the_beguine = self.fp.tell()
self.fp.seek(var._begin)
self._pack_begin(the_beguine)
self.fp.seek(the_beguine)
# Write data.
if not var.isrec:
self.fp.write(var.data.tostring())
count = var.data.size * var.data.itemsize
self.fp.write(b'0' * (var._vsize - count))
else: # record variable
# Handle rec vars with shape[0] < nrecs.
if self._recs > len(var.data):
shape = (self._recs,) + var.data.shape[1:]
var.data.resize(shape)
pos0 = pos = self.fp.tell()
for rec in var.data:
# Apparently scalars cannot be converted to big endian. If we
# try to convert a ``=i4`` scalar to, say, '>i4' the dtype
# will remain as ``=i4``.
if not rec.shape and (rec.dtype.byteorder == '<' or
(rec.dtype.byteorder == '=' and LITTLE_ENDIAN)):
rec = rec.byteswap()
self.fp.write(rec.tostring())
# Padding
count = rec.size * rec.itemsize
self.fp.write(b'0' * (var._vsize - count))
pos += self._recsize
self.fp.seek(pos)
self.fp.seek(pos0 + var._vsize)
def _write_values(self, values):
if hasattr(values, 'dtype'):
nc_type = REVERSE[values.dtype.char, values.dtype.itemsize]
else:
types = [(t, NC_INT) for t in integer_types]
types += [
(float, NC_FLOAT),
(str, NC_CHAR)
]
# bytes index into scalars in py3k. Check for "string" types
if isinstance(values, text_type) or isinstance(values, binary_type):
sample = values
else:
try:
sample = values[0] # subscriptable?
except TypeError:
sample = values # scalar
for class_, nc_type in types:
if isinstance(sample, class_):
break
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
# asarray() dies with bytes and '>c' in py3k. Change to 'S'
dtype_ = 'S' if dtype_ == '>c' else dtype_
values = asarray(values, dtype=dtype_)
self.fp.write(asbytes(nc_type))
if values.dtype.char == 'S':
nelems = values.itemsize
else:
nelems = values.size
self._pack_int(nelems)
if not values.shape and (values.dtype.byteorder == '<' or
(values.dtype.byteorder == '=' and LITTLE_ENDIAN)):
values = values.byteswap()
self.fp.write(values.tostring())
count = values.size * values.itemsize
self.fp.write(b'0' * (-count % 4)) # pad
def _read(self):
# Check magic bytes and version
magic = self.fp.read(3)
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
# Read file headers and set data.
self._read_numrecs()
self._read_dim_array()
self._read_gatt_array()
self._read_var_array()
def _read_numrecs(self):
self.__dict__['_recs'] = self._unpack_int()
def _read_dim_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_DIMENSION]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
for dim in range(count):
name = asstr(self._unpack_string())
length = self._unpack_int() or None # None for record dimension
self.dimensions[name] = length
self._dims.append(name) # preserve order
def _read_gatt_array(self):
for k, v in self._read_att_array().items():
self.__setattr__(k, v)
def _read_att_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_ATTRIBUTE]:
raise ValueError("Unexpected header.")
count = self._unpack_int()
attributes = {}
for attr in range(count):
name = asstr(self._unpack_string())
attributes[name] = self._read_values()
return attributes
def _read_var_array(self):
header = self.fp.read(4)
if header not in [ZERO, NC_VARIABLE]:
raise ValueError("Unexpected header.")
begin = 0
dtypes = {'names': [], 'formats': []}
rec_vars = []
count = self._unpack_int()
for var in range(count):
(name, dimensions, shape, attributes,
typecode, size, dtype_, begin_, vsize) = self._read_var()
# http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
# Note that vsize is the product of the dimension lengths
# (omitting the record dimension) and the number of bytes
# per value (determined from the type), increased to the
# next multiple of 4, for each variable. If a record
# variable, this is the amount of space per record. The
# netCDF "record size" is calculated as the sum of the
# vsize's of all the record variables.
#
# The vsize field is actually redundant, because its value
# may be computed from other information in the header. The
# 32-bit vsize field is not large enough to contain the size
# of variables that require more than 2^32 - 4 bytes, so
# 2^32 - 1 is used in the vsize field for such variables.
if shape and shape[0] is None: # record variable
rec_vars.append(name)
# The netCDF "record size" is calculated as the sum of
# the vsize's of all the record variables.
self.__dict__['_recsize'] += vsize
if begin == 0:
begin = begin_
dtypes['names'].append(name)
dtypes['formats'].append(str(shape[1:]) + dtype_)
# Handle padding with a virtual variable.
if typecode in 'bch':
actual_size = reduce(mul, (1,) + shape[1:]) * size
padding = -actual_size % 4
if padding:
dtypes['names'].append('_padding_%d' % var)
dtypes['formats'].append('(%d,)>b' % padding)
# Data will be set later.
data = None
else: # not a record variable
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.use_mmap:
data = self._mm_buf[begin_:begin_+a_size].view(dtype=dtype_)
data.shape = shape
else:
pos = self.fp.tell()
self.fp.seek(begin_)
data = fromstring(self.fp.read(a_size), dtype=dtype_)
data.shape = shape
self.fp.seek(pos)
# Add variable.
self.variables[name] = netcdf_variable(
data, typecode, size, shape, dimensions, attributes)
if rec_vars:
# Remove padding when only one record variable.
if len(rec_vars) == 1:
dtypes['names'] = dtypes['names'][:1]
dtypes['formats'] = dtypes['formats'][:1]
# Build rec array.
if self.use_mmap:
rec_array = self._mm_buf[begin:begin+self._recs*self._recsize].view(dtype=dtypes)
rec_array.shape = (self._recs,)
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array.shape = (self._recs,)
self.fp.seek(pos)
for var in rec_vars:
self.variables[var].__dict__['data'] = rec_array[var]
def _read_var(self):
name = asstr(self._unpack_string())
dimensions = []
shape = []
dims = self._unpack_int()
for i in range(dims):
dimid = self._unpack_int()
dimname = self._dims[dimid]
dimensions.append(dimname)
dim = self.dimensions[dimname]
shape.append(dim)
dimensions = tuple(dimensions)
shape = tuple(shape)
attributes = self._read_att_array()
nc_type = self.fp.read(4)
vsize = self._unpack_int()
begin = [self._unpack_int, self._unpack_int64][self.version_byte-1]()
typecode, size = TYPEMAP[nc_type]
dtype_ = '>%s' % typecode
return name, dimensions, shape, attributes, typecode, size, dtype_, begin, vsize
def _read_values(self):
nc_type = self.fp.read(4)
n = self._unpack_int()
typecode, size = TYPEMAP[nc_type]
count = n*size
values = self.fp.read(int(count))
self.fp.read(-count % 4) # read padding
if typecode is not 'c':
values = fromstring(values, dtype='>%s' % typecode)
if values.shape == (1,):
values = values[0]
else:
values = values.rstrip(b'\x00')
return values
def _pack_begin(self, begin):
if self.version_byte == 1:
self._pack_int(begin)
elif self.version_byte == 2:
self._pack_int64(begin)
def _pack_int(self, value):
self.fp.write(array(value, '>i').tostring())
_pack_int32 = _pack_int
def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int
def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())
def _unpack_int64(self):
return fromstring(self.fp.read(8), '>q')[0]
def _pack_string(self, s):
count = len(s)
self._pack_int(count)
self.fp.write(asbytes(s))
self.fp.write(b'0' * (-count % 4)) # pad
def _unpack_string(self):
count = self._unpack_int()
s = self.fp.read(count).rstrip(b'\x00')
self.fp.read(-count % 4) # read padding
return s
class netcdf_variable(object):
"""
A data object for the `netcdf` module.
`netcdf_variable` objects are constructed by calling the method
`netcdf_file.createVariable` on the `netcdf_file` object. `netcdf_variable`
objects behave much like array objects defined in numpy, except that their
data resides in a file. Data is read by indexing and written by assigning
to an indexed subset; the entire array can be accessed by the index ``[:]``
or (for scalars) by using the methods `getValue` and `assignValue`.
`netcdf_variable` objects also have attribute `shape` with the same meaning
as for arrays, but the shape cannot be modified. There is another read-only
attribute `dimensions`, whose value is the tuple of dimension names.
All other attributes correspond to variable attributes defined in
the NetCDF file. Variable attributes are created by assigning to an
attribute of the `netcdf_variable` object.
Parameters
----------
data : array_like
The data array that holds the values for the variable.
Typically, this is initialized as empty, but with the proper shape.
typecode : dtype character code
Desired data-type for the data array.
size : int
Desired element size for the data array.
shape : sequence of ints
The shape of the array. This should match the lengths of the
variable's dimensions.
dimensions : sequence of strings
The names of the dimensions used by the variable. Must be in the
same order of the dimension lengths given by `shape`.
attributes : dict, optional
Attribute values (any type) keyed by string names. These attributes
become attributes for the netcdf_variable object.
Attributes
----------
dimensions : list of str
List of names of dimensions used by the variable object.
isrec, shape
Properties
See also
--------
isrec, shape
"""
def __init__(self, data, typecode, size, shape, dimensions, attributes=None):
self.data = data
self._typecode = typecode
self._size = size
self._shape = shape
self.dimensions = dimensions
self._attributes = attributes or {}
for k, v in self._attributes.items():
self.__dict__[k] = v
def __setattr__(self, attr, value):
# Store user defined attributes in a separate dict,
# so we can save them to file later.
try:
self._attributes[attr] = value
except AttributeError:
pass
self.__dict__[attr] = value
def isrec(self):
"""Returns whether the variable has a record dimension or not.
A record dimension is a dimension along which additional data could be
easily appended in the netcdf data structure without much rewriting of
the data file. This attribute is a read-only property of the
`netcdf_variable`.
"""
return bool(self.data.shape) and not self._shape[0]
isrec = property(isrec)
def shape(self):
"""Returns the shape tuple of the data variable.
This is a read-only attribute and can not be modified in the
same manner of other numpy arrays.
"""
return self.data.shape
shape = property(shape)
def getValue(self):
"""
Retrieve a scalar value from a `netcdf_variable` of length one.
Raises
------
ValueError
If the netcdf variable is an array of length greater than one,
this exception will be raised.
"""
return self.data.item()
def assignValue(self, value):
"""
Assign a scalar value to a `netcdf_variable` of length one.
Parameters
----------
value : scalar
Scalar value (of compatible type) to assign to a length-one netcdf
variable. This value will be written to file.
Raises
------
ValueError
If the input is not a scalar, or if the destination is not a length-one
netcdf variable.
"""
if not self.data.flags.writeable:
# Work-around for a bug in NumPy. Calling itemset() on a read-only
# memory-mapped array causes a seg. fault.
# See NumPy ticket #1622, and SciPy ticket #1202.
# This check for `writeable` can be removed when the oldest version
# of numpy still supported by scipy contains the fix for #1622.
raise RuntimeError("variable is not writeable")
self.data.itemset(value)
def typecode(self):
"""
Return the typecode of the variable.
Returns
-------
typecode : char
The character typecode of the variable (eg, 'i' for int).
"""
return self._typecode
def itemsize(self):
"""
Return the itemsize of the variable.
Returns
-------
itemsize : int
The element size of the variable (eg, 8 for float64).
"""
return self._size
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, data):
# Expand data for record vars?
if self.isrec:
if isinstance(index, tuple):
rec_index = index[0]
else:
rec_index = index
if isinstance(rec_index, slice):
recs = (rec_index.start or 0) + len(data)
else:
recs = rec_index + 1
if recs > len(self.data):
shape = (recs,) + self._shape[1:]
self.data.resize(shape)
self.data[index] = data
NetCDFFile = netcdf_file
NetCDFVariable = netcdf_variable
|
Agnishom/kabooblydoo
|
refs/heads/master
|
lib/flask/testsuite/test_apps/flask_broken/__init__.py
|
629
|
import flask.ext.broken.b
import missing_module
|
nyalldawson/QGIS
|
refs/heads/master
|
scripts/qgis_fixes/fix_ws_comma.py
|
77
|
from lib2to3.fixes.fix_ws_comma import FixWsComma
|
slevenhagen/odoo-npg
|
refs/heads/8.0
|
addons/auth_oauth/controllers/main.py
|
205
|
import functools
import logging
import simplejson
import urlparse
import werkzeug.utils
from werkzeug.exceptions import BadRequest
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import db_monodb, ensure_db, set_cookie_and_redirect, login_and_redirect
from openerp.addons.auth_signup.controllers.main import AuthSignupHome as Home
from openerp.modules.registry import RegistryManager
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# helpers
#----------------------------------------------------------
def fragment_to_query_string(func):
@functools.wraps(func)
def wrapper(self, *a, **kw):
kw.pop('debug', False)
if not kw:
return """<html><head><script>
var l = window.location;
var q = l.hash.substring(1);
var r = l.pathname + l.search;
if(q.length !== 0) {
var s = l.search ? (l.search === '?' ? '' : '&') : '?';
r = l.pathname + l.search + s + q;
}
if (r == l.pathname) {
r = '/';
}
window.location = r;
</script></head><body></body></html>"""
return func(self, *a, **kw)
return wrapper
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class OAuthLogin(Home):
def list_providers(self):
try:
provider_obj = request.registry.get('auth.oauth.provider')
providers = provider_obj.search_read(request.cr, SUPERUSER_ID, [('enabled', '=', True), ('auth_endpoint', '!=', False), ('validation_endpoint', '!=', False)])
# TODO in forwardport: remove conditions on 'auth_endpoint' and 'validation_endpoint' when these fields will be 'required' in model
except Exception:
providers = []
for provider in providers:
return_url = request.httprequest.url_root + 'auth_oauth/signin'
state = self.get_state(provider)
params = dict(
debug=request.debug,
response_type='token',
client_id=provider['client_id'],
redirect_uri=return_url,
scope=provider['scope'],
state=simplejson.dumps(state),
)
provider['auth_link'] = provider['auth_endpoint'] + '?' + werkzeug.url_encode(params)
return providers
def get_state(self, provider):
redirect = request.params.get('redirect') or 'web'
if not redirect.startswith(('//', 'http://', 'https://')):
redirect = '%s%s' % (request.httprequest.url_root, redirect[1:] if redirect[0] == '/' else redirect)
state = dict(
d=request.session.db,
p=provider['id'],
r=werkzeug.url_quote_plus(redirect),
)
token = request.params.get('token')
if token:
state['t'] = token
return state
@http.route()
def web_login(self, *args, **kw):
ensure_db()
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
providers = self.list_providers()
response = super(OAuthLogin, self).web_login(*args, **kw)
if response.is_qweb:
error = request.params.get('oauth_error')
if error == '1':
error = _("Sign up is not allowed on this database.")
elif error == '2':
error = _("Access Denied")
elif error == '3':
error = _("You do not have access to this database or your invitation has expired. Please ask for an invitation and be sure to follow the link in your invitation email.")
else:
error = None
response.qcontext['providers'] = providers
if error:
response.qcontext['error'] = error
return response
@http.route()
def web_auth_signup(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_signup(*args, **kw)
response.qcontext.update(providers=providers)
return response
@http.route()
def web_auth_reset_password(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_reset_password(*args, **kw)
response.qcontext.update(providers=providers)
return response
class OAuthController(http.Controller):
@http.route('/auth_oauth/signin', type='http', auth='none')
@fragment_to_query_string
def signin(self, **kw):
state = simplejson.loads(kw['state'])
dbname = state['d']
provider = state['p']
context = state.get('c', {})
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
try:
u = registry.get('res.users')
credentials = u.auth_oauth(cr, SUPERUSER_ID, provider, kw, context=context)
cr.commit()
action = state.get('a')
menu = state.get('m')
redirect = werkzeug.url_unquote_plus(state['r']) if state.get('r') else False
url = '/web'
if redirect:
url = redirect
elif action:
url = '/web#action=%s' % action
elif menu:
url = '/web#menu_id=%s' % menu
return login_and_redirect(*credentials, redirect_url=url)
except AttributeError:
# auth_signup is not installed
_logger.error("auth_signup not installed on database %s: oauth sign up cancelled." % (dbname,))
url = "/web/login?oauth_error=1"
except openerp.exceptions.AccessDenied:
# oauth credentials not valid, user could be on a temporary session
_logger.info('OAuth2: access denied, redirect to main page in case a valid session exists, without setting cookies')
url = "/web/login?oauth_error=3"
redirect = werkzeug.utils.redirect(url, 303)
redirect.autocorrect_location_header = False
return redirect
except Exception, e:
# signup error
_logger.exception("OAuth2: %s" % str(e))
url = "/web/login?oauth_error=2"
return set_cookie_and_redirect(url)
@http.route('/auth_oauth/oea', type='http', auth='none')
def oea(self, **kw):
"""login user via Odoo Account provider"""
dbname = kw.pop('db', None)
if not dbname:
dbname = db_monodb()
if not dbname:
return BadRequest()
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
IMD = registry['ir.model.data']
try:
model, provider_id = IMD.get_object_reference(cr, SUPERUSER_ID, 'auth_oauth', 'provider_openerp')
except ValueError:
return set_cookie_and_redirect('/web?db=%s' % dbname)
assert model == 'auth.oauth.provider'
state = {
'd': dbname,
'p': provider_id,
'c': {'no_user_creation': True},
}
kw['state'] = simplejson.dumps(state)
return self.signin(**kw)
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
|
jbeluch/xam
|
refs/heads/master
|
tests/test_addon.py
|
1
|
import os
from unittest import TestCase
from xml.etree import ElementTree as ET
from xam import Addon
try:
from collections import OrderedDict
except ImportError:
from collective.ordereddict import OrderedDict
class TestAddon(TestCase):
def assert_attrs(self, obj, attrs):
for attr_name, expected_value in attrs.items():
attr_value = getattr(obj, attr_name)
self.assertEqual(expected_value, attr_value)
self.assertTrue(isinstance(attr_value, unicode))
def assert_dict(self, expected, actual):
for key, val in actual.items():
self.assertTrue(isinstance(key, unicode))
self.assertTrue(isinstance(val, unicode))
self.assertEqual(expected, actual)
def test_parse(self):
addon = Addon.from_filename(os.path.join(os.path.dirname(__file__), 'data', 'addon.xml'))
expected = {
# attr_name: expected_value
'version': u'1.2.1',
'id': u'plugin.video.academicearth',
'name': u'Academic Earth',
'provider': u'Jonathan Beluch (jbel)',
}
self.assert_attrs(addon, expected)
self.assert_dict({
u'xbmc.python': u'2.0',
u'script.module.beautifulsoup': u'3.0.8',
u'script.module.xbmcswift': u'0.2.0',
u'plugin.video.youtube': u'2.9.1',
}, addon.dependencies)
self.assertEqual(addon.languages, ['en', 'fr'])
self.assertNotEqual(None, addon.metadata)
self.assertEqual('all', addon.platform)
self.assertEqual(OrderedDict(
[(None, 'Watch lectures from Academic Earth (http://academicearth.org)')]
), addon.summaries)
self.assertEqual('Watch lectures from Academic Earth (http://academicearth.org)',
addon.summary())
#self.assertEqual('Watch lectures from Academic Earth (http://academicearth.org)',
#addon.summary('en'))
self.assertEqual(OrderedDict(
[(None,'Browse online courses and lectures from the world\'s top scholars.')]
), addon.descriptions)
self.assertEqual('Browse online courses and lectures from the world\'s top scholars.',
addon.description())
def test_setters(self):
xml = ET.parse(os.path.join(os.path.dirname(__file__), 'data', 'addon.xml')).getroot()
addon = Addon(xml)
self.assertEqual('1.2.1', addon.version)
addon.version = '1.2.2'
self.assertEqual('1.2.2', addon.version)
def test_to_dict(self):
addon = Addon.from_filename(os.path.join(os.path.dirname(__file__), 'data', 'addon.xml'))
actual = addon.to_dict()
with open(os.path.join(os.path.dirname(__file__), 'data', 'addon.xml')) as inp:
xml = inp.read()
expected = {
'id': u'plugin.video.academicearth',
'name': u'Academic Earth',
'version': u'1.2.1',
'provider': u'Jonathan Beluch (jbel)',
'dependencies': {
'xbmc.python': '2.0',
'script.module.beautifulsoup': '3.0.8',
'script.module.xbmcswift': '0.2.0',
'plugin.video.youtube': '2.9.1',
},
'summaries': {None: u"Watch lectures from Academic Earth (http://academicearth.org)"},
'descriptions': {None: u"Browse online courses and lectures from the world's top scholars."},
'platform': 'all',
'_xml': xml,
}
for key, val in expected.items():
if not key.startswith('_'):
self.assertEqual(val, actual[key])
LANG_XML_TMP = '''
<addon id="plugin.video.academicearth" name="Academic Earth" provider-name="Jonathan Beluch (jbel)" version="1.2.1">
<extension point="xbmc.addon.metadata">
%s
</extension>
</addon>
'''
class TestLangTags(TestCase):
def test_no_lang_tag(self):
xmlstr = LANG_XML_TMP % ''
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, [])
def test_self_close_lang_tag(self):
xmlstr = LANG_XML_TMP % '<language/>'
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, [])
def test_empty_lang_tag(self):
xmlstr = LANG_XML_TMP % '<language></language>'
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, [])
def test_data_lang_tag(self):
xmlstr = LANG_XML_TMP % '<language>en</language>'
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, ['en'])
xmlstr = LANG_XML_TMP % '<language>en fr</language>'
addon = Addon(ET.fromstring(xmlstr))
self.assertEqual(addon.languages, ['en', 'fr'])
if __name__ == '__main__':
unittest.main()
|
SnabbCo/neutron
|
refs/heads/master
|
neutron/tests/unit/test_common_log.py
|
22
|
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import log as call_log
from neutron.tests import base
MODULE_NAME = 'neutron.tests.unit.test_common_log'
class TargetKlass(object):
@call_log.log
def test_method(self, arg1, arg2, *args, **kwargs):
pass
class TestCallLog(base.BaseTestCase):
def setUp(self):
super(TestCallLog, self).setUp()
self.klass = TargetKlass()
self.expected_format = ('%(class_name)s method %(method_name)s '
'called with arguments %(args)s %(kwargs)s')
self.expected_data = {'class_name': MODULE_NAME + '.TargetKlass',
'method_name': 'test_method',
'args': (),
'kwargs': {}}
def test_call_log_all_args(self):
self.expected_data['args'] = (10, 20)
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, 20)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_all_kwargs(self):
self.expected_data['kwargs'] = {'arg1': 10, 'arg2': 20}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(arg1=10, arg2=20)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_known_args_unknown_args_kwargs(self):
self.expected_data['args'] = (10, 20, 30)
self.expected_data['kwargs'] = {'arg4': 40}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, 20, 30, arg4=40)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
def test_call_log_known_args_kwargs_unknown_kwargs(self):
self.expected_data['args'] = (10,)
self.expected_data['kwargs'] = {'arg2': 20, 'arg3': 30, 'arg4': 40}
with mock.patch.object(call_log.LOG, 'debug') as log_debug:
self.klass.test_method(10, arg2=20, arg3=30, arg4=40)
log_debug.assert_called_once_with(self.expected_format,
self.expected_data)
|
pixelrebel/st2
|
refs/heads/master
|
st2common/st2common/util/actionalias_matching.py
|
4
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from st2common.exceptions.content import ParseException
from st2common.models.utils.action_alias_utils import extract_parameters
__all__ = [
'list_format_strings_from_aliases',
'normalise_alias_format_string',
'match_command_to_alias'
]
def list_format_strings_from_aliases(aliases):
'''
List patterns from a collection of alias objects
:param aliases: The list of aliases
:type aliases: ``list`` of :class:`st2common.models.api.action.ActionAliasAPI`
:return: A description of potential execution patterns in a list of aliases.
:rtype: ``list`` of ``list``
'''
patterns = []
for alias in aliases:
for format_ in alias.formats:
display, representations = normalise_alias_format_string(format_)
patterns.extend([(display, representation) for representation in representations])
return patterns
def normalise_alias_format_string(alias_format):
'''
StackStorm action aliases can have two types;
1. A simple string holding the format
2. A dictionary which hold numerous alias format "representation(s)"
With a single "display" for help about the action alias.
This function processes both forms and returns a standardized form.
:param alias_format: The alias format
:type alias_format: ``str`` or ``dict``
:return: The representation of the alias
:rtype: ``tuple`` of (``str``, ``str``)
'''
display = None
representation = []
if isinstance(alias_format, six.string_types):
display = alias_format
representation.append(alias_format)
elif isinstance(alias_format, dict):
display = alias_format['display']
representation = alias_format['representation']
else:
raise TypeError("alias_format '%s' is neither a dictionary or string type."
% repr(alias_format))
return (display, representation)
def match_command_to_alias(command, aliases):
"""
Match the text against an action and return the action reference.
"""
results = []
for alias in aliases:
format_strings = list_format_strings_from_aliases([alias])
for format_string in format_strings:
try:
extract_parameters(format_str=format_string[1],
param_stream=command)
except ParseException:
continue
results.append((alias, format_string[0], format_string[1]))
return results
|
yeyanchao/calibre
|
refs/heads/master
|
src/calibre/ebooks/metadata/kdl.py
|
10
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import re, urllib, urlparse, socket
from mechanize import URLError
from calibre.ebooks.metadata.book.base import Metadata
from calibre import browser
from calibre.ebooks.BeautifulSoup import BeautifulSoup
from calibre.ebooks.chardet import xml_to_unicode
URL = \
"http://ww2.kdl.org/libcat/WhatsNext.asp?AuthorLastName={0}&AuthorFirstName=&SeriesName=&BookTitle={1}&CategoryID=0&cmdSearch=Search&Search=1&grouping="
_ignore_starts = u'\'"'+u''.join(unichr(x) for x in range(0x2018, 0x201e)+[0x2032, 0x2033])
def get_series(title, authors, timeout=60):
mi = Metadata(title, authors)
if title and title[0] in _ignore_starts:
title = title[1:]
title = re.sub(r'^(A|The|An)\s+', '', title).strip()
if not title:
return mi
if isinstance(title, unicode):
title = title.encode('utf-8')
title = urllib.quote_plus(title)
author = authors[0].strip()
if not author:
return mi
if ',' in author:
author = author.split(',')[0]
else:
author = author.split()[-1]
url = URL.format(author, title)
br = browser()
try:
raw = br.open_novisit(url, timeout=timeout).read()
except URLError as e:
if isinstance(e.reason, socket.timeout):
raise Exception('KDL Server busy, try again later')
raise
if 'see the full results' not in raw:
return mi
raw = xml_to_unicode(raw)[0]
soup = BeautifulSoup(raw)
searcharea = soup.find('div', attrs={'class':'searcharea'})
if searcharea is None:
return mi
ss = searcharea.find('div', attrs={'class':'seriessearch'})
if ss is None:
return mi
a = ss.find('a', href=True)
if a is None:
return mi
href = a['href'].partition('?')[-1]
data = urlparse.parse_qs(href)
series = data.get('SeriesName', [])
if not series:
return mi
series = series[0]
series = re.sub(r' series$', '', series).strip()
if series:
mi.series = series
ns = ss.nextSibling
if ns.contents:
raw = unicode(ns.contents[0])
raw = raw.partition('.')[0].strip()
try:
mi.series_index = int(raw)
except:
pass
return mi
if __name__ == '__main__':
import sys
print get_series(sys.argv[-2], [sys.argv[-1]])
|
emsrc/daeso-framework
|
refs/heads/master
|
lib/daeso/string/number.py
|
1
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2013 by Erwin Marsi and TST-Centrale
#
# This file is part of the DAESO Framework.
#
# The DAESO Framework is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# The DAESO Framework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Spell out numbers
"""
__authors__ = "Erwin Marsi <e.marsi@gmail.com>"
__all__ = [
"spell_out_number"]
def spell_out_number(n, prefer_hundreds=True):
"""
spell out a positive integer number in words
"""
s = str(n)
if len(s) == 4 and s[1] != "0" and prefer_hundreds:
# spell out a positive integer number in words using hundreds, where n
# must have four digits and the second digit cannot be zero:
# 2001 ==> *twintighonderdeen
return ( _digit_triple_to_words("0" + s[:2]) +
"honderd" +
_digit_triple_to_words("0" + s[2:]) )
words = ""
i = 0
while s:
triple = s[-3:].rjust(3, "0")
if i == 0 and triple == "000":
words = "nul"
elif i == 1 and triple == "001":
# 1000 ==> *eenduizend
words = ( _triple_modifiers[i] +
words )
else:
# TWEEduizend, ..., negenhondernegenennegentigDUIZEND,
# EEN miljoen, ...
words = ( _digit_triple_to_words(triple) +
_triple_modifiers[i] +
words )
s = s[:-3]
i += 1
# strip spaces from words starting/ending with a triple modifier
return words.strip()
# mapping of triples of digits to words
# for all basic numbers and irregular cases
_triple_to_word = {
"000": "", # zeros alway remain unexpressed, except in "nul"
"001": "een",
"002": "twee",
"003": "drie",
"004": "vier",
"005": "vijf",
"006": "zes",
"007": "zeven",
"008": "acht",
"009": "negen",
"010": "tien",
"011": "elf",
"012": "twaalf",
"013": "dertien",
"014": "veertien",
"015": "vijftien",
"016": "zestien",
"017": "zeventien",
"018": "achttien",
"019": "negentien",
"020": "twintig",
"030": "dertig",
"040": "veertig",
"050": "vijftig",
"060": "zestig",
"070": "zeventig",
"080": "tachtig",
"090": "negentig"
}
# modifiers that can be combined with an expanded triple
_triple_modifiers = (
"",
"duizend ", # no space preceding duizend
" miljoen ",
" miljard ",
" biljoen ",
" biljard ",
" triljoen ",
" triljard "
)
def _digit_triple_to_words(digits):
"""
express a string of three digits in words
"""
try:
return _triple_to_word[digits]
except KeyError:
pass
d1, d2, d3 = digits
words = ""
if d1 not in "01":
# TWEEhonderd, DRIEhonderd, ..., NEGENhonderd
words += _triple_to_word["00" + d1]
if d1 != "0":
# HONDERD, tweeHONDERD, ..., negenHONDERD
words += "honderd"
return words + _digit_triple_to_words("0" + d2 + d3)
if d3 != "0":
# EENENtwintig, TWEEENtwintig, ..., NEGENENnegentig
words += _triple_to_word["00" + d3] + "en"
if d2 != "0":
# TWINTIG, eenenTWINTIG, tweeenTWINTIG, ..., negenenNEGENTIG
words += _triple_to_word["0" + d2 + "0"]
return words
print spell_out_number(4401)
print spell_out_number(6028)
print spell_out_number(271850)
|
rchlchang/byte1
|
refs/heads/master
|
lib/werkzeug/debug/tbtools.py
|
311
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug.tbtools
~~~~~~~~~~~~~~~~~~~~~~
This module provides various traceback related utility functions.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import re
import os
import sys
import json
import inspect
import traceback
import codecs
from tokenize import TokenError
from werkzeug.utils import cached_property, escape
from werkzeug.debug.console import Console
from werkzeug._compat import range_type, PY2, text_type, string_types
_coding_re = re.compile(r'coding[:=]\s*([-\w.]+)')
_line_re = re.compile(r'^(.*?)$(?m)')
_funcdef_re = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)')
UTF8_COOKIE = '\xef\xbb\xbf'
system_exceptions = (SystemExit, KeyboardInterrupt)
try:
system_exceptions += (GeneratorExit,)
except NameError:
pass
HEADER = u'''\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>%(title)s // Werkzeug Debugger</title>
<link rel="stylesheet" href="?__debugger__=yes&cmd=resource&f=style.css" type="text/css">
<!-- We need to make sure this has a favicon so that the debugger does not by
accident trigger a request to /favicon.ico which might change the application
state. -->
<link rel="shortcut icon" href="?__debugger__=yes&cmd=resource&f=console.png">
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=jquery.js"></script>
<script type="text/javascript" src="?__debugger__=yes&cmd=resource&f=debugger.js"></script>
<script type="text/javascript">
var TRACEBACK = %(traceback_id)d,
CONSOLE_MODE = %(console)s,
EVALEX = %(evalex)s,
SECRET = "%(secret)s";
</script>
</head>
<body>
<div class="debugger">
'''
FOOTER = u'''\
<div class="footer">
Brought to you by <strong class="arthur">DON'T PANIC</strong>, your
friendly Werkzeug powered traceback interpreter.
</div>
</div>
</body>
</html>
'''
PAGE_HTML = HEADER + u'''\
<h1>%(exception_type)s</h1>
<div class="detail">
<p class="errormsg">%(exception)s</p>
</div>
<h2 class="traceback">Traceback <em>(most recent call last)</em></h2>
%(summary)s
<div class="plain">
<form action="/?__debugger__=yes&cmd=paste" method="post">
<p>
<input type="hidden" name="language" value="pytb">
This is the Copy/Paste friendly version of the traceback. <span
class="pastemessage">You can also paste this traceback into
a <a href="https://gist.github.com/">gist</a>:
<input type="submit" value="create paste"></span>
</p>
<textarea cols="50" rows="10" name="code" readonly>%(plaintext)s</textarea>
</form>
</div>
<div class="explanation">
The debugger caught an exception in your WSGI application. You can now
look at the traceback which led to the error. <span class="nojavascript">
If you enable JavaScript you can also use additional features such as code
execution (if the evalex feature is enabled), automatic pasting of the
exceptions and much more.</span>
</div>
''' + FOOTER + '''
<!--
%(plaintext_cs)s
-->
'''
CONSOLE_HTML = HEADER + u'''\
<h1>Interactive Console</h1>
<div class="explanation">
In this console you can execute Python expressions in the context of the
application. The initial namespace was created by the debugger automatically.
</div>
<div class="console"><div class="inner">The Console requires JavaScript.</div></div>
''' + FOOTER
SUMMARY_HTML = u'''\
<div class="%(classes)s">
%(title)s
<ul>%(frames)s</ul>
%(description)s
</div>
'''
FRAME_HTML = u'''\
<div class="frame" id="frame-%(id)d">
<h4>File <cite class="filename">"%(filename)s"</cite>,
line <em class="line">%(lineno)s</em>,
in <code class="function">%(function_name)s</code></h4>
<pre>%(current_line)s</pre>
</div>
'''
SOURCE_TABLE_HTML = u'<table class=source>%s</table>'
SOURCE_LINE_HTML = u'''\
<tr class="%(classes)s">
<td class=lineno>%(lineno)s</td>
<td>%(code)s</td>
</tr>
'''
def render_console_html(secret):
return CONSOLE_HTML % {
'evalex': 'true',
'console': 'true',
'title': 'Console',
'secret': secret,
'traceback_id': -1
}
def get_current_traceback(ignore_system_exceptions=False,
show_hidden_frames=False, skip=0):
"""Get the current exception info as `Traceback` object. Per default
calling this method will reraise system exceptions such as generator exit,
system exit or others. This behavior can be disabled by passing `False`
to the function as first parameter.
"""
exc_type, exc_value, tb = sys.exc_info()
if ignore_system_exceptions and exc_type in system_exceptions:
raise
for x in range_type(skip):
if tb.tb_next is None:
break
tb = tb.tb_next
tb = Traceback(exc_type, exc_value, tb)
if not show_hidden_frames:
tb.filter_hidden_frames()
return tb
class Line(object):
"""Helper for the source renderer."""
__slots__ = ('lineno', 'code', 'in_frame', 'current')
def __init__(self, lineno, code):
self.lineno = lineno
self.code = code
self.in_frame = False
self.current = False
def classes(self):
rv = ['line']
if self.in_frame:
rv.append('in-frame')
if self.current:
rv.append('current')
return rv
classes = property(classes)
def render(self):
return SOURCE_LINE_HTML % {
'classes': u' '.join(self.classes),
'lineno': self.lineno,
'code': escape(self.code)
}
class Traceback(object):
"""Wraps a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.exc_type = exc_type
self.exc_value = exc_value
if not isinstance(exc_type, str):
exception_type = exc_type.__name__
if exc_type.__module__ not in ('__builtin__', 'exceptions'):
exception_type = exc_type.__module__ + '.' + exception_type
else:
exception_type = exc_type
self.exception_type = exception_type
# we only add frames to the list that are not hidden. This follows
# the the magic variables as defined by paste.exceptions.collector
self.frames = []
while tb:
self.frames.append(Frame(exc_type, exc_value, tb))
tb = tb.tb_next
def filter_hidden_frames(self):
"""Remove the frames according to the paste spec."""
if not self.frames:
return
new_frames = []
hidden = False
for frame in self.frames:
hide = frame.hide
if hide in ('before', 'before_and_this'):
new_frames = []
hidden = False
if hide == 'before_and_this':
continue
elif hide in ('reset', 'reset_and_this'):
hidden = False
if hide == 'reset_and_this':
continue
elif hide in ('after', 'after_and_this'):
hidden = True
if hide == 'after_and_this':
continue
elif hide or hidden:
continue
new_frames.append(frame)
# if we only have one frame and that frame is from the codeop
# module, remove it.
if len(new_frames) == 1 and self.frames[0].module == 'codeop':
del self.frames[:]
# if the last frame is missing something went terrible wrong :(
elif self.frames[-1] in new_frames:
self.frames[:] = new_frames
def is_syntax_error(self):
"""Is it a syntax error?"""
return isinstance(self.exc_value, SyntaxError)
is_syntax_error = property(is_syntax_error)
def exception(self):
"""String representation of the exception."""
buf = traceback.format_exception_only(self.exc_type, self.exc_value)
rv = ''.join(buf).strip()
return rv.decode('utf-8', 'replace') if PY2 else rv
exception = property(exception)
def log(self, logfile=None):
"""Log the ASCII traceback into a file object."""
if logfile is None:
logfile = sys.stderr
tb = self.plaintext.rstrip() + u'\n'
if PY2:
tb.encode('utf-8', 'replace')
logfile.write(tb)
def paste(self):
"""Create a paste and return the paste id."""
data = json.dumps({
'description': 'Werkzeug Internal Server Error',
'public': False,
'files': {
'traceback.txt': {
'content': self.plaintext
}
}
}).encode('utf-8')
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
rv = urlopen('https://api.github.com/gists', data=data)
resp = json.loads(rv.read().decode('utf-8'))
rv.close()
return {
'url': resp['html_url'],
'id': resp['id']
}
def render_summary(self, include_title=True):
"""Render the traceback for the interactive console."""
title = ''
frames = []
classes = ['traceback']
if not self.frames:
classes.append('noframe-traceback')
if include_title:
if self.is_syntax_error:
title = u'Syntax Error'
else:
title = u'Traceback <em>(most recent call last)</em>:'
for frame in self.frames:
frames.append(u'<li%s>%s' % (
frame.info and u' title="%s"' % escape(frame.info) or u'',
frame.render()
))
if self.is_syntax_error:
description_wrapper = u'<pre class=syntaxerror>%s</pre>'
else:
description_wrapper = u'<blockquote>%s</blockquote>'
return SUMMARY_HTML % {
'classes': u' '.join(classes),
'title': title and u'<h3>%s</h3>' % title or u'',
'frames': u'\n'.join(frames),
'description': description_wrapper % escape(self.exception)
}
def render_full(self, evalex=False, secret=None):
"""Render the Full HTML page with the traceback info."""
exc = escape(self.exception)
return PAGE_HTML % {
'evalex': evalex and 'true' or 'false',
'console': 'false',
'title': exc,
'exception': exc,
'exception_type': escape(self.exception_type),
'summary': self.render_summary(include_title=False),
'plaintext': self.plaintext,
'plaintext_cs': re.sub('-{2,}', '-', self.plaintext),
'traceback_id': self.id,
'secret': secret
}
def generate_plaintext_traceback(self):
"""Like the plaintext attribute but returns a generator"""
yield u'Traceback (most recent call last):'
for frame in self.frames:
yield u' File "%s", line %s, in %s' % (
frame.filename,
frame.lineno,
frame.function_name
)
yield u' ' + frame.current_line.strip()
yield self.exception
def plaintext(self):
return u'\n'.join(self.generate_plaintext_traceback())
plaintext = cached_property(plaintext)
id = property(lambda x: id(x))
class Frame(object):
"""A single frame in a traceback."""
def __init__(self, exc_type, exc_value, tb):
self.lineno = tb.tb_lineno
self.function_name = tb.tb_frame.f_code.co_name
self.locals = tb.tb_frame.f_locals
self.globals = tb.tb_frame.f_globals
fn = inspect.getsourcefile(tb) or inspect.getfile(tb)
if fn[-4:] in ('.pyo', '.pyc'):
fn = fn[:-1]
# if it's a file on the file system resolve the real filename.
if os.path.isfile(fn):
fn = os.path.realpath(fn)
self.filename = fn
self.module = self.globals.get('__name__')
self.loader = self.globals.get('__loader__')
self.code = tb.tb_frame.f_code
# support for paste's traceback extensions
self.hide = self.locals.get('__traceback_hide__', False)
info = self.locals.get('__traceback_info__')
if info is not None:
try:
info = text_type(info)
except UnicodeError:
info = str(info).decode('utf-8', 'replace')
self.info = info
def render(self):
"""Render a single frame in a traceback."""
return FRAME_HTML % {
'id': self.id,
'filename': escape(self.filename),
'lineno': self.lineno,
'function_name': escape(self.function_name),
'current_line': escape(self.current_line.strip())
}
def get_annotated_lines(self):
"""Helper function that returns lines with extra information."""
lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]
# find function definition and mark lines
if hasattr(self.code, 'co_firstlineno'):
lineno = self.code.co_firstlineno - 1
while lineno > 0:
if _funcdef_re.match(lines[lineno].code):
break
lineno -= 1
try:
offset = len(inspect.getblock([x.code + '\n' for x
in lines[lineno:]]))
except TokenError:
offset = 0
for line in lines[lineno:lineno + offset]:
line.in_frame = True
# mark current line
try:
lines[self.lineno - 1].current = True
except IndexError:
pass
return lines
def render_source(self):
"""Render the sourcecode."""
return SOURCE_TABLE_HTML % u'\n'.join(line.render() for line in
self.get_annotated_lines())
def eval(self, code, mode='single'):
"""Evaluate code in the context of the frame."""
if isinstance(code, string_types):
if PY2 and isinstance(code, unicode):
code = UTF8_COOKIE + code.encode('utf-8')
code = compile(code, '<interactive>', mode)
return eval(code, self.globals, self.locals)
@cached_property
def sourcelines(self):
"""The sourcecode of the file as list of unicode strings."""
# get sourcecode from loader or file
source = None
if self.loader is not None:
try:
if hasattr(self.loader, 'get_source'):
source = self.loader.get_source(self.module)
elif hasattr(self.loader, 'get_source_by_code'):
source = self.loader.get_source_by_code(self.code)
except Exception:
# we munch the exception so that we don't cause troubles
# if the loader is broken.
pass
if source is None:
try:
f = open(self.filename)
except IOError:
return []
try:
source = f.read()
finally:
f.close()
# already unicode? return right away
if isinstance(source, text_type):
return source.splitlines()
# yes. it should be ascii, but we don't want to reject too many
# characters in the debugger if something breaks
charset = 'utf-8'
if source.startswith(UTF8_COOKIE):
source = source[3:]
else:
for idx, match in enumerate(_line_re.finditer(source)):
match = _line_re.search(match.group())
if match is not None:
charset = match.group(1)
break
if idx > 1:
break
# on broken cookies we fall back to utf-8 too
try:
codecs.lookup(charset)
except LookupError:
charset = 'utf-8'
return source.decode(charset, 'replace').splitlines()
@property
def current_line(self):
try:
return self.sourcelines[self.lineno - 1]
except IndexError:
return u''
@cached_property
def console(self):
return Console(self.globals, self.locals)
id = property(lambda x: id(x))
|
caibo2014/teuthology
|
refs/heads/master
|
teuthology/task/tests/test_run.py
|
10
|
import logging
import pytest
from StringIO import StringIO
from teuthology.exceptions import CommandFailedError
log = logging.getLogger(__name__)
class TestRun(object):
"""
Tests to see if we can make remote procedure calls to the current cluster
"""
def test_command_failed_label(self, ctx, config):
result = ""
try:
ctx.cluster.run(
args=["python", "-c", "assert False"],
label="working as expected, nothing to see here"
)
except CommandFailedError as e:
result = str(e)
assert "working as expected" in result
def test_command_failed_no_label(self, ctx, config):
with pytest.raises(CommandFailedError):
ctx.cluster.run(
args=["python", "-c", "assert False"],
)
def test_command_success(self, ctx, config):
result = StringIO()
ctx.cluster.run(
args=["python", "-c", "print 'hi'"],
stdout=result
)
assert result.getvalue().strip() == "hi"
|
rnirmal/openstack-dashboard
|
refs/heads/master
|
django-openstack/django_openstack/middleware/__init__.py
|
12133432
| |
gabrielx52/local_harvest
|
refs/heads/master
|
local_harvest/__init__.py
|
12133432
| |
FuzzyHobbit/scrapy
|
refs/heads/master
|
tests/test_utils_misc/test_walk_modules/__init__.py
|
12133432
| |
nzjrs/conduit
|
refs/heads/master
|
conduit/modules/EvolutionModule/__init__.py
|
12133432
| |
Propanu/upm
|
refs/heads/master
|
examples/python/ili9341.py
|
6
|
#!/usr/bin/env python
# Author: Shawn Hymel
# Copyright (c) 2016 SparkFun Electronics
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
from upm import pyupm_ili9341 as ili9341
def main():
# Pins (Edison)
# CS_LCD GP44 (MRAA 31)
# CS_SD GP43 (MRAA 38) unused
# DC GP12 (MRAA 20)
# RESEST GP13 (MRAA 14)
lcd = ili9341.ILI9341(31, 38, 20, 14)
# Fill the screen with a solid color
lcd.fillScreen(lcd.color565(0, 40, 16))
# Draw some shapes
lcd.drawFastVLine(10, 10, 100, ili9341.ILI9341_RED)
lcd.drawFastHLine(20, 10, 50, ili9341.ILI9341_CYAN)
lcd.drawLine(160, 30, 200, 60, ili9341.ILI9341_GREEN)
lcd.fillRect(20, 30, 75, 60, ili9341.ILI9341_ORANGE)
lcd.drawCircle(70, 50, 20, ili9341.ILI9341_PURPLE)
lcd.fillCircle(120, 50, 20, ili9341.ILI9341_PURPLE)
lcd.drawTriangle(50, 100, 10, 140, 90, 140, ili9341.ILI9341_YELLOW)
lcd.fillTriangle(150, 100, 110, 140, 190, 140, ili9341.ILI9341_YELLOW)
lcd.drawRoundRect(20, 150, 50, 30, 10, ili9341.ILI9341_RED)
lcd.drawRoundRect(130, 150, 50, 30, 10, ili9341.ILI9341_RED)
lcd.fillRoundRect(75, 150, 50, 30, 10, ili9341.ILI9341_RED)
# Write some text
lcd.setCursor(0, 200)
lcd.setTextColor(ili9341.ILI9341_LIGHTGREY)
lcd.setTextWrap(True)
lcd.setTextSize(1)
lcd._print("Text 1\n")
lcd.setTextSize(2)
lcd._print("Text 2\n")
lcd.setTextSize(3)
lcd._print("Text 3\n")
lcd.setTextSize(4)
lcd._print("Text 4\n")
# Test screen rotation
for r in range(0, 4):
lcd.setRotation(r)
lcd.fillRect(0, 0, 5, 5, ili9341.ILI9341_WHITE)
time.sleep(1)
# Invert colors, wait, then revert back
lcd.invertDisplay(True)
time.sleep(2)
lcd.invertDisplay(False)
# Don't forget to free up that memory!
del lcd
if __name__ == '__main__':
main()
|
broadinstitute/gatk
|
refs/heads/master
|
src/main/python/org/broadinstitute/hellbender/vqsr_cnn/vqsr_cnn/arguments.py
|
3
|
import argparse
import numpy as np
import keras.backend as K
from . import defines
def parse_args():
"""Parse command line arguments.
The args namespace is used promiscuously in this module.
Its fields control the tensor definition, dataset generation, training, file I/O and evaluation.
Some of the fields are typically dicts or lists that are not actually set on the command line,
but via a companion argument also in the namespace.
For example, input_symbols is set via the input_symbol_set string
and, annotations is set via the annotation_set string.
Here we also seed the random number generator.
The keras image data format is set here as well via the channels_last or channels_first arguments.
Returns:
namespace: The args namespace that is used throughout this module.
"""
parser = argparse.ArgumentParser()
# Tensor defining arguments
parser.add_argument('--tensor_name', default='read_tensor', choices=defines.TENSOR_MAPS_1D+defines.TENSOR_MAPS_2D,
help='String key which identifies the map from tensor channels to their meaning.')
parser.add_argument('--labels', default=defines.SNP_INDEL_LABELS,
help='Dict mapping label names to their index within label tensors.')
parser.add_argument('--input_symbol_set', default='dna_indel', choices=defines.INPUT_SYMBOLS.keys(),
help='Key which maps to an input symbol to index mapping.')
parser.add_argument('--input_symbols', help='Dict mapping input symbols to their index within input tensors, '
+ 'initialised via input_symbols_set argument')
parser.add_argument('--batch_size', default=32, type=int,
help='Mini batch size for stochastic gradient descent algorithms.')
parser.add_argument('--read_limit', default=128, type=int,
help='Maximum number of reads to load.')
parser.add_argument('--window_size', default=128, type=int,
help='Size of sequence window to use as input, typically centered at a variant.')
parser.add_argument('--base_quality_mode', default='phot', choices=['phot', 'phred', '1hot'],
help='How to treat base qualities, must be in [phot, phred, 1hot]')
parser.add_argument('--channels_last', default=True, dest='channels_last', action='store_true',
help='Store the channels in the last axis of tensors, tensorflow->true, theano->false')
parser.add_argument('--channels_first', dest='channels_last', action='store_false',
help='Store the channels in the first axis of tensors, tensorflow->false, theano->true')
# Annotation arguments
parser.add_argument('--annotations', help='Array of annotation names, initialised via annotation_set argument')
parser.add_argument('--annotation_set', default='best_practices', choices=defines.ANNOTATIONS_SETS.keys(),
help='Key which maps to an annotations list (or _ to ignore annotations).')
# Dataset generation related arguments
parser.add_argument('--samples', default=500, type=int,
help='Maximum number of data samples to write or load.')
parser.add_argument('--downsample_snps', default=1.0, type=float,
help='Rate of SNP examples that are kept must be in [0.0, 1.0].')
parser.add_argument('--downsample_indels', default=1.0, type=float,
help='Rate of INDEL examples that are kept must be in [0.0, 1.0].')
parser.add_argument('--downsample_not_snps', default=1.0, type=float,
help='Rate of NOT_SNP examples that are kept must be in [0.0, 1.0].')
parser.add_argument('--downsample_not_indels', default=1.0, type=float,
help='Rate of NOT_INDEL examples that are kept must be in [0.0, 1.0].')
parser.add_argument('--downsample_reference', default=0.001, type=float,
help='Rate of reference genotype examples that are kept must be in [0.0, 1.0].')
parser.add_argument('--downsample_homozygous', default=0.001, type=float,
help='Rate of homozygous genotypes that are kept must be in [0.0, 1.0].')
parser.add_argument('--start_pos', default=0, type=int,
help='Genomic position start for parallel tensor writing.')
parser.add_argument('--end_pos', default=0, type=int,
help='Genomic position end for parallel tensor writing.')
parser.add_argument('--skip_positive_class', default=False, action='store_true',
help='Whether to skip positive examples when writing tensors.')
parser.add_argument('--chrom', help='Chromosome to load for parallel tensor writing.')
# I/O files and directories: vcfs, bams, beds, hd5, fasta
parser.add_argument('--output_dir', default='./', help='Directory to write models or other data out.')
parser.add_argument('--image_dir', default=None, help='Directory to write images and plots to.')
parser.add_argument('--reference_fasta', help='The reference FASTA file (e.g. HG19 or HG38).')
parser.add_argument('--weights_hd5', default='',
help='A hd5 file of weights to initialize a model, will use all layers with names that match.')
parser.add_argument('--architecture', default='',
help='A json file specifying semantics and architecture of a neural net.')
parser.add_argument('--bam_file',
help='Path to a BAM file to train from or generate tensors with.')
parser.add_argument('--train_vcf',
help='Path to a VCF that has verified true calls from NIST, platinum genomes, etc.')
parser.add_argument('--input_vcf',
help='Haplotype Caller or VQSR generated VCF with raw annotation values [and quality scores].')
parser.add_argument('--output_vcf', default=None,
help='Optional VCF to write to.')
parser.add_argument('--bed_file',
help='Bed file specifying high confidence intervals associated with args.train_vcf.')
parser.add_argument('--data_dir',
help='Directory of tensors, must be split into test/valid/train directories'
+'with subdirectories for each label.')
# Training and optimization related arguments
parser.add_argument('--epochs', default=25, type=int,
help='Number of epochs, typically passes through the entire dataset, not always well-defined.')
parser.add_argument('--batch_normalization', default=False, action='store_true',
help='Mini batch normalization layers after convolutions.')
parser.add_argument('--patience', default=4, type=int,
help='Maximum number of epochs to run without validation loss improvements (Early Stopping).')
parser.add_argument('--training_steps', default=80, type=int,
help='Number of training batches to examine in an epoch.')
parser.add_argument('--validation_steps', default=40, type=int,
help='Number of validation batches to examine in an epoch validation.')
parser.add_argument('--iterations', default=5, type=int,
help='Generic iteration limit for hyperparameter optimization, animation, and other counts.')
parser.add_argument('--tensor_board', default=False, action='store_true',
help='Add the tensor board callback.')
# Architecture defining arguments
parser.add_argument('--conv_width', default=5, type=int, help='Width of convolutional kernels.')
parser.add_argument('--conv_height', default=5, type=int, help='Height of convolutional kernels.')
parser.add_argument('--conv_dropout', default=0.0, type=float,
help='Dropout rate in convolutional layers.')
parser.add_argument('--conv_batch_normalize', default=False, action='store_true',
help='Batch normalize convolutional layers.')
parser.add_argument('--conv_layers', nargs='+', default=[128, 96, 64, 48], type=int,
help='List of sizes for each convolutional filter layer')
parser.add_argument('--padding', default='valid', choices=['valid', 'same'],
help='Valid or same border padding for convolutional layers.')
parser.add_argument('--spatial_dropout', default=False, action='store_true',
help='Spatial dropout on the convolutional layers.')
parser.add_argument('--max_pools', nargs='+', default=[], type=int,
help='List of max-pooling layers.')
parser.add_argument('--fc_layers', nargs='+', default=[32], type=int,
help='List of sizes for each fully connected layer')
parser.add_argument('--fc_dropout', default=0.0, type=float,
help='Dropout rate in fully connected layers.')
parser.add_argument('--fc_batch_normalize', default=False, action='store_true',
help='Batch normalize fully connected layers.')
parser.add_argument('--annotation_units', default=16, type=int,
help='Number of units connected to the annotation input layer.')
parser.add_argument('--annotation_shortcut', default=False, action='store_true',
help='Shortcut connections on the annotations.')
# Evaluation related arguments
parser.add_argument('--score_keys', nargs='+', default=['VQSLOD'],
help='List of variant score keys for performance comparisons.')
parser.add_argument('--tranches', nargs='+', default=[100, 99.9, 99, 95, 90], type=float,
help='List of variant score keys for performance comparisons.')
# Run specific arguments
parser.add_argument('--mode', help='High level recipe: write tensors, train, test or evaluate models.')
parser.add_argument('--id', default='no_id',
help='Identifier for this run, user-defined string to keep experiments organized.')
parser.add_argument('--gatk_version', default='4.1.0.0',
help='GATK version used to run this code.')
parser.add_argument('--model_version', default='1.0',
help='Model version for this run.')
parser.add_argument('--random_seed', default=12878, type=int,
help='Random seed to use throughout run. Always use np.random.')
# Parse, print, set annotations and seed
args = parser.parse_args()
args.annotations = annotations_from_args(args)
args.input_symbols = input_symbols_from_args(args)
np.random.seed(args.random_seed)
if args.channels_last:
K.set_image_data_format('channels_last')
else:
K.set_image_data_format('channels_first')
print('Arguments are', args)
return args
def annotations_from_args(args):
"""Get list of annotations corresponding to the args.annotation_set.
The annotation_set argument allows us to name commonly used groups of annotations
without having to specify each annotation individually.
Arguments:
args.annotation_set: The key for which annotation set to use.
Returns:
list: Annotation strings as they appear in a VCF info/format field or None.
"""
if args.annotation_set and args.annotation_set in defines.ANNOTATIONS_SETS:
return defines.ANNOTATIONS_SETS[args.annotation_set]
return None
def input_symbols_from_args(args):
"""Get dictionary mapping input data symbols to indices in the input tensor.
Arguments:
args.input_symbol_set: The key for the symbol set to use.
Returns:
dict: if there is a input symbol dict otherwise None
"""
if args.input_symbol_set and args.input_symbol_set in defines.INPUT_SYMBOLS:
return defines.INPUT_SYMBOLS[args.input_symbol_set]
return None
def weight_path_from_args(args):
"""Create a weight file name from the command line arguments.
Arguments:
args.output_dir: The directory where the file will be saved
args.id: The name of the file is this run's id with tensor suffix as file extension
"""
save_weight_hd5 = args.output_dir + args.id + defines.TENSOR_SUFFIX
return save_weight_hd5
|
open-machine-learning/mldata-utils
|
refs/heads/master
|
scripts/validate.py
|
1
|
#!/usr/bin/env python
"""Convert from one supported format to another.
Example usage:
python validate.py mydata.h5
python validate.py otherdata.arff
python validate.py problemdata.h5 output.csv
"""
import sys
import ml2h5.fileformat
import ml2h5.converter
from ml2h5.converter.h5_csv import H5_CSV
from ml2h5.converter.h5_arff import H5_ARFF
from ml2h5.converter.h5_libsvm import H5_LibSVM
from ml2h5.converter.h5_mat import H5_MAT
from ml2h5.converter.h5_octave import H5_OCTAVE
from ml2h5.converter.h5_rdata import H5_RData
from ml2h5.converter.h5_uci import H5_UCI
from ml2h5.converter.basehandler import BaseHandler
converter_factory = {'h5': BaseHandler,
'csv': H5_CSV,
'arff': H5_ARFF,
'libsvm': H5_LibSVM,
'Rdata': H5_RData,
'data': H5_UCI,
'octave': H5_OCTAVE,
'matlab': H5_MAT,
}
def usage():
print("""Usage: """ + sys.argv[0] + """ filename [filename_out]""")
def convert(file_in, file_out):
"""Convert mldata data file from file_in to file_out.
Only format conversion and check that output file can be read.
"""
print('Converting ' + file_in + ' to ' + file_out)
format_out = ml2h5.fileformat.get(file_out)
conv = ml2h5.converter.Converter(file_in, file_out)
conv.run(verify=True)
check = converter_factory[format_out](file_out)
data = check.read()
def validate(filename):
"""Detect file format, then convert"""
format_in = ml2h5.fileformat.get(filename)
if format_in == 'h5':
for format_out in ml2h5.converter.FROM_H5:
try:
convert(filename, 'validated.' + format_out)
except:
print('Conversion failed')
elif format_in in ml2h5.converter.TO_H5:
try:
convert(filename, 'validated.h5')
except:
print('Conversion failed')
if __name__ == '__main__':
argc = len(sys.argv)
if argc != 2 and argc != 3:
usage()
sys.exit(1)
if argc == 2:
validate(sys.argv[1])
else:
convert(sys.argv[1], sys.argv[2])
|
subramani95/neutron
|
refs/heads/master
|
neutron/openstack/common/loopingcall.py
|
25
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from eventlet import event
from eventlet import greenthread
from neutron.openstack.common.gettextutils import _
from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
|
havatv/QGIS
|
refs/heads/master
|
tests/src/python/test_qgsserver_plugins.py
|
4
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsServer plugins and filters.
From build dir, run: ctest -R PyQgsServerPlugins -V
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alessandro Pasotti'
__date__ = '22/04/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import os
from qgis.server import QgsServer
from qgis.core import QgsMessageLog
from qgis.testing import unittest
from utilities import unitTestDataPath
from test_qgsserver import QgsServerTestBase
import osgeo.gdal # NOQA
# Strip path and content length because path may vary
RE_STRIP_UNCHECKABLE = br'MAP=[^"]+|Content-Length: \d+'
RE_ATTRIBUTES = br'[^>\s]+=[^>\s]+'
class TestQgsServerPlugins(QgsServerTestBase):
def setUp(self):
"""Create the server instance"""
self.testdata_path = unitTestDataPath('qgis_server') + '/'
d = unitTestDataPath('qgis_server_accesscontrol') + '/'
self.projectPath = os.path.join(d, "project.qgs")
# Clean env just to be sure
env_vars = ['QUERY_STRING', 'QGIS_PROJECT_FILE']
for ev in env_vars:
try:
del os.environ[ev]
except KeyError:
pass
self.server = QgsServer()
def test_pluginfilters(self):
"""Test python plugins filters"""
try:
from qgis.server import QgsServerFilter
except ImportError:
print("QGIS Server plugins are not compiled. Skipping test")
return
class SimpleHelloFilter(QgsServerFilter):
def requestReady(self):
QgsMessageLog.logMessage("SimpleHelloFilter.requestReady")
def sendResponse(self):
QgsMessageLog.logMessage("SimpleHelloFilter.sendResponse")
def responseComplete(self):
request = self.serverInterface().requestHandler()
params = request.parameterMap()
QgsMessageLog.logMessage("SimpleHelloFilter.responseComplete")
if params.get('SERVICE', '').upper() == 'SIMPLE':
request.clear()
request.setResponseHeader('Content-type', 'text/plain')
request.appendBody('Hello from SimpleServer!'.encode('utf-8'))
serverIface = self.server.serverInterface()
filter = SimpleHelloFilter(serverIface)
serverIface.registerFilter(filter, 100)
# Get registered filters
self.assertEqual(filter, serverIface.filters()[100][0])
# global to be modified inside plugin filters
globals()['status_code'] = 0
# body to be checked inside plugin filters
globals()['body2'] = None
# headers to be checked inside plugin filters
globals()['headers2'] = None
# Register some more filters
class Filter1(QgsServerFilter):
def responseComplete(self):
request = self.serverInterface().requestHandler()
params = request.parameterMap()
if params.get('SERVICE', '').upper() == 'SIMPLE':
request.appendBody('Hello from Filter1!'.encode('utf-8'))
class Filter2(QgsServerFilter):
def responseComplete(self):
request = self.serverInterface().requestHandler()
params = request.parameterMap()
if params.get('SERVICE', '').upper() == 'SIMPLE':
request.appendBody('Hello from Filter2!'.encode('utf-8'))
class Filter3(QgsServerFilter):
"""Test get and set status code"""
def responseComplete(self):
global status_code
request = self.serverInterface().requestHandler()
request.setStatusCode(999)
status_code = request.statusCode()
class Filter4(QgsServerFilter):
"""Body getter"""
def responseComplete(self):
global body2
request = self.serverInterface().requestHandler()
body2 = request.body()
class Filter5(QgsServerFilter):
"""Body setter, clear body, keep headers"""
def responseComplete(self):
global headers2
request = self.serverInterface().requestHandler()
request.clearBody()
headers2 = request.responseHeaders()
request.appendBody('new body, new life!'.encode('utf-8'))
filter1 = Filter1(serverIface)
filter2 = Filter2(serverIface)
filter3 = Filter3(serverIface)
filter4 = Filter4(serverIface)
serverIface.registerFilter(filter1, 101)
serverIface.registerFilter(filter2, 200)
serverIface.registerFilter(filter2, 100)
serverIface.registerFilter(filter3, 300)
serverIface.registerFilter(filter4, 400)
self.assertTrue(filter2 in serverIface.filters()[100])
self.assertEqual(filter1, serverIface.filters()[101][0])
self.assertEqual(filter2, serverIface.filters()[200][0])
header, body = [_v for _v in self._execute_request('?service=simple')]
response = header + body
expected = b'Content-Length: 62\nContent-type: text/plain\n\nHello from SimpleServer!Hello from Filter1!Hello from Filter2!'
self.assertEqual(response, expected)
# Check status code
self.assertEqual(status_code, 999)
# Check body getter from filter
self.assertEqual(body2, b'Hello from SimpleServer!Hello from Filter1!Hello from Filter2!')
# Check that the bindings for complex type QgsServerFiltersMap are working
filters = {100: [filter, filter2], 101: [filter1], 200: [filter2]}
serverIface.setFilters(filters)
self.assertTrue(filter in serverIface.filters()[100])
self.assertTrue(filter2 in serverIface.filters()[100])
self.assertEqual(filter1, serverIface.filters()[101][0])
self.assertEqual(filter2, serverIface.filters()[200][0])
header, body = self._execute_request('?service=simple')
response = header + body
expected = b'Content-Length: 62\nContent-type: text/plain\n\nHello from SimpleServer!Hello from Filter1!Hello from Filter2!'
self.assertEqual(response, expected)
# Now, re-run with body setter
filter5 = Filter5(serverIface)
serverIface.registerFilter(filter5, 500)
header, body = self._execute_request('?service=simple')
response = header + body
expected = b'Content-Length: 19\nContent-type: text/plain\n\nnew body, new life!'
self.assertEqual(response, expected)
self.assertEqual(headers2, {'Content-type': 'text/plain'})
def test_configpath(self):
""" Test plugin can read confif path
"""
try:
from qgis.server import QgsServerFilter
from qgis.core import QgsProject
except ImportError:
print("QGIS Server plugins are not compiled. Skipping test")
return
d = unitTestDataPath('qgis_server_accesscontrol') + '/'
self.projectPath = os.path.join(d, "project.qgs")
self.server = QgsServer()
# global to be modified inside plugin filters
globals()['configFilePath2'] = None
class Filter0(QgsServerFilter):
"""Body setter, clear body, keep headers"""
def requestReady(self):
global configFilePath2
configFilePath2 = self.serverInterface().configFilePath()
serverIface = self.server.serverInterface()
serverIface.registerFilter(Filter0(serverIface), 100)
# Test using MAP
self._execute_request('?service=simple&MAP=%s' % self.projectPath)
# Check config file path
self.assertEqual(configFilePath2, self.projectPath)
# Reset result
globals()['configFilePath2'] = None
# Test with prqject as argument
project = QgsProject()
project.read(self.projectPath)
self._execute_request_project('?service=simple', project=project)
# Check config file path
self.assertEqual(configFilePath2, project.fileName())
def test_exceptions(self):
"""Test that plugin filter Python exceptions can be caught"""
try:
from qgis.server import QgsServerFilter
except ImportError:
print("QGIS Server plugins are not compiled. Skipping test")
return
class FilterBroken(QgsServerFilter):
def responseComplete(self):
raise Exception("There was something very wrong!")
serverIface = self.server.serverInterface()
filter1 = FilterBroken(serverIface)
filters = {100: [filter1]}
serverIface.setFilters(filters)
header, body = self._execute_request('')
self.assertEqual(body, b'Internal Server Error')
serverIface.setFilters({})
if __name__ == '__main__':
unittest.main()
|
talbrecht/pism_pik
|
refs/heads/stable1.0
|
site-packages/siple/opt/linesearchCR.py
|
3
|
############################################################################
#
# This file is a part of siple.
#
# Copyright 2010, 2014 David Maxwell
#
# siple is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
############################################################################
from numpy import isinf, isnan, sqrt, any, isreal, real, nan, inf
from siple.reporting import msg
from siple.params import Bunch, Parameters
#This program is distributed WITHOUT ANY WARRANTY; without even the implied
#warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# This is a modified version of:
#
#
#This file contains a Python version of Carl Rasmussen's Matlab-function
#minimize.m
#
#minimize.m is copyright (C) 1999 - 2006, Carl Edward Rasmussen.
#Python adaptation by Roland Memisevic 2008.
#
#
#The following is the original copyright notice that comes with the
#function minimize.m
#(from http://www.kyb.tuebingen.mpg.de/bs/people/carl/code/minimize/Copyright):
#
#
#"(C) Copyright 1999 - 2006, Carl Edward Rasmussen
#
#Permission is granted for anyone to copy, use, or modify these
#programs and accompanying documents for purposes of research or
#education, provided this copyright notice is retained, and note is
#made of any changes that have been made.
#
#These programs and documents are distributed without any warranty,
#express or implied. As the programs were written for research
#purposes only, they have not been tested to the degree that would be
#advisable in any important application. All use of these programs is
#entirely at the user's own risk."
class LinesearchCR:
@staticmethod
def defaultParameters():
return Parameters('linesearchCR',
INT= 0.1, # don't reevaluate within 0.1 of the limit of the current bracket
EXT = 3.0, # extrapolate maximum 3 times the current step-size
MAX = 20, # max 20 function evaluations per line search
RATIO = 10, # maximum allowed slope ratio
SIG = 0.1,
verbose=False) # RHO = SIG/2) # SIG and RHO are the constants controlling the Wolfe-
def __init__(self,params=None):
self.params = self.defaultParameters()
if not (params is None): self.params.update(params)
def error(self):
return self.code > 0
def search(self,f,f0,df0,t0=None):
INT = self.params.INT; # don't reevaluate within 0.1 of the limit of the current bracket
EXT = self.params.EXT; # extrapolate maximum 3 times the current step-size
MAX = self.params.MAX; # max 20 function evaluations per line search
RATIO = self.params.RATIO; # maximum allowed slope ratio
SIG = self.params.SIG;
RHO = SIG/2;
SMALL = 10.**-16 #minimize.m uses matlab's realmin
# SIG and RHO are the constants controlling the Wolfe-
#Powell conditions. SIG is the maximum allowed absolute ratio between
#previous and new slopes (derivatives in the search direction), thus setting
#SIG to low (positive) values forces higher precision in the line-searches.
#RHO is the minimum allowed fraction of the expected (from the slope at the
#initial point in the linesearch). Constants must satisfy 0 < RHO < SIG < 1.
#Tuning of SIG (depending on the nature of the function to be optimized) may
#speed up the minimization; it is probably not worth playing much with RHO.
d0 = df0;
fdata0 = None
if t0 is None:
t0 = 1/(1.-d0)
x3 = t0 # initial step is red/(|s|+1)
X0 = 0; F0 = f0; dF0 = df0; Fdata0=fdata0 # make a copy of current values
M = MAX
x2 = 0; f2 = f0; d2 = d0;
while True: # keep extrapolating as long as necessary
# x2 = 0; f2 = f0; d2 = d0;
f3 = f0; df3 = df0; fdata3 = fdata0;
success = 0
while (not success) and (M > 0):
try:
M = M - 1
(f3, df3, fdata3) = f(x3)
if isnan(f3) or isinf(f3) or any(isnan(df3)+isinf(df3)):
raise Exception('nan')
success = 1
except: # catch any error which occured in f
if self.params.verbose: msg('error on extrapolate. shrinking %g to %g', x3, (x2+x3)/2)
x3 = (x2+x3)/2 # bisect and try again
if f3 < F0:
X0 = x3; F0 = f3; dF0 = df3; Fdata0=fdata3 # keep best values
d3 = df3 # new slope
if d3 > SIG*d0 or f3 > f0+x3*RHO*d0 or M == 0: break # are we done extrapolati
x1 = x2; f1 = f2; d1 = d2 # move point 2 to point 1
x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2
A = 6*(f1-f2)+3*(d2+d1)*(x2-x1) # make cubic extrapolation
B = 3*(f2-f1)-(2*d1+d2)*(x2-x1)
Z = B+sqrt(complex(B*B-A*d1*(x2-x1)))
if Z != 0.0:
x3 = x1-d1*(x2-x1)**2/Z # num. error possible, ok!
else:
x3 = inf
if (not isreal(x3)) or isnan(x3) or isinf(x3) or (x3 < 0):
# num prob | wrong sign?
x3 = x2*EXT # extrapolate maximum amount
elif x3 > x2*EXT: # new point beyond extrapolation limit?
x3 = x2*EXT # extrapolate maximum amount
elif x3 < x2+INT*(x2-x1): # new point too close to previous point?
x3 = x2+INT*(x2-x1)
x3 = real(x3)
msg('extrapolating: x1 %g d1 %g x2 %g d2 %g x3 %g',x1,d1,x2,d3,x3)
while (abs(d3) > -SIG*d0 or f3 > f0+x3*RHO*d0) and M > 0:
if (d3 > 0) or (f3 > f0+x3*RHO*d0): # choose subinterval
x4 = x3; f4 = f3; d4 = d3 # move point 3 to point 4
else:
x2 = x3; f2 = f3; d2 = d3 # move point 3 to point 2
if self.params.verbose: msg('interpolating x2 %g x4 %g f2 %g f4 %g wolfef %g d2 %g d4 %g wolfed %g ',x2,x4,f2,f4,f0+x3*RHO*d0,d2,d4,-SIG*d0)
if f4 > f0:
x3 = x2-(0.5*d2*(x4-x2)**2)/(f4-f2-d2*(x4-x2)) # quadratic interpolation
else:
A = 6*(f2-f4)/(x4-x2)+3*(d4+d2) # cubic interpolation
B = 3*(f4-f2)-(2*d2+d4)*(x4-x2)
if A != 0:
x3=x2+(sqrt(B*B-A*d2*(x4-x2)**2)-B)/A # num. error possible, ok!
else:
x3 = inf
if isnan(x3) or isinf(x3):
x3 = (x2+x4)/2 # if we had a numerical problem then bisect
x3 = max(min(x3, x4-INT*(x4-x2)),x2+INT*(x4-x2)) # don't accept too close
(f3, df3, fdata3) = f(x3);
d3 =df3;
M = M - 1; # count epochs
if f3 < F0:
X0 = x3; F0 = f3; dF0 = df3; Fdata0 = fdata3 # keep best values
if (abs(d3) < -SIG*d0) and (f3 < f0+x3*RHO*d0): # if line search succeeded
self.code = 0
self.value = Bunch(F=f3,Fp=d3,t=x3,data=fdata3)
self.errMsg = ""
else:
self.code = 1
if M == 0:
self.errMsg = 'Too many function evaluations (>%d)' % MAX
else:
self.errMsg = 'unknown error';
self.value = Bunch(F=f0,Fp=dF0,t=X0,data=Fdata0)
|
Dino0631/RedRain-Bot
|
refs/heads/develop
|
cogs/lib/pip/_vendor/requests/structures.py
|
615
|
# -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
from .compat import OrderedDict
class CaseInsensitiveDict(collections.MutableMapping):
"""A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = OrderedDict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
|
sserrot/champion_relationships
|
refs/heads/master
|
venv/Lib/site-packages/setuptools/_vendor/six.py
|
2715
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2015 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.10.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
if sys.version_info[:2] == (3, 2):
exec_("""def raise_from(value, from_value):
if from_value is None:
raise value
raise value from from_value
""")
elif sys.version_info[:2] > (3, 2):
exec_("""def raise_from(value, from_value):
raise value from from_value
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
def wrapper(f):
f = functools.wraps(wrapped, assigned, updated)(f)
f.__wrapped__ = wrapped
return f
return wrapper
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.