code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# GUICommon.py - common functions for GUIScripts of all game types
import GemRB
import GUIClasses
import CommonTables
from ie_restype import RES_CHU, RES_WMP, RES_ARE
from ie_spells import LS_MEMO
from GUIDefines import *
from ie_stats import *
from ie_slots import SLOT_ALL
OtherWindowFn = None
NextWindowFn = None
CommonTables.Load ()
def CloseOtherWindow (NewWindowFn):
global OtherWindowFn,NextWindowFn
GemRB.LeaveContainer()
if OtherWindowFn and OtherWindowFn != NewWindowFn:
# allow detection of 'next window'
NextWindowFn = NewWindowFn
# switching from a window to something else, call old function
OtherWindowFn ()
OtherWindowFn = NewWindowFn
return 0
elif OtherWindowFn:
# something is calling us with its own function, so
# it is closing down, return true
OtherWindowFn = None
return 1
else:
# new window, no need to do setup
OtherWindowFn = NewWindowFn
NextWindowFn = None
return 0
def GetWindowPack():
width = GemRB.GetSystemVariable (SV_WIDTH)
height = GemRB.GetSystemVariable (SV_HEIGHT)
if GemRB.GameType == "pst":
default = "GUIWORLD"
else:
default = "GUIW"
# use a custom gui if there is one
gui = "CGUI" + str(width)[:2] + str(height)[:2]
if GemRB.HasResource (gui, RES_CHU, 1):
return gui
gui = None
if width == 640:
gui = default
elif width == 800:
gui = "GUIW08"
elif width == 1024:
gui = "GUIW10"
elif width == 1280:
gui = "GUIW12"
if gui:
if GemRB.HasResource (gui, RES_CHU, 1):
return gui
# fallback to the smallest resolution
return default
def LocationPressed ():
AreaInfo = GemRB.GetAreaInfo()
print( "%s [%d.%d]\n"%(AreaInfo["CurrentArea"], AreaInfo["PositionX"], AreaInfo["PositionY"]) );
return
def RestPress ():
# FIXME: check "rest until healed", it's an option in some games
GemRB.RestParty(0, 0, 8)
return
def SelectFormation ():
GemRB.GameSetFormation (GemRB.GetVar ("Formation"))
return
def OpenFloatMenuWindow ():
if GameIsPST():
import FloatMenuWindow
FloatMenuWindow.OpenFloatMenuWindow()
else:
GemRB.GameControlSetTargetMode (TARGET_MODE_NONE)
def GetActorPaperDoll (actor):
anim_id = GemRB.GetPlayerStat (actor, IE_ANIMATION_ID)
level = GemRB.GetPlayerStat (actor, IE_ARMOR_TYPE)
row = "0x%04X" %anim_id
which = "LEVEL%d" %(level+1)
doll = CommonTables.Pdolls.GetValue (row, which)
if doll == "*":
print "GetActorPaperDoll: Missing paper doll for animation", row, which
return doll
def SelectAllOnPress ():
GemRB.GameSelectPC (0, 1)
def GearsClicked ():
#GemRB.SetPlayerStat(GemRB.GameGetFirstSelectedPC (),44,249990)
GemRB.GamePause (2, 0)
def SetColorStat (Actor, Stat, Value):
t = Value & 0xFF
t |= t << 8
t |= t << 16
GemRB.SetPlayerStat (Actor, Stat, t)
return
def CheckStat100 (Actor, Stat, Diff):
mystat = GemRB.GetPlayerStat (Actor, Stat)
goal = GemRB.Roll (1,100, Diff)
if mystat>=goal:
return True
return False
def CheckStat20 (Actor, Stat, Diff):
mystat = GemRB.GetPlayerStat (Actor, Stat)
goal = GemRB.Roll (1,20, Diff)
if mystat>=goal:
return True
return False
def GameIsPST ():
return GemRB.GameType == "pst"
def GameIsIWD ():
return GemRB.GameType == "iwd"
def GameIsHOW ():
return GemRB.GameType == "how"
def GameIsIWD1 ():
return GemRB.GameType == "iwd" or GemRB.GameType == "how"
def GameIsIWD2 ():
return GemRB.GameType == "iwd2"
def GameIsBG1 ():
return GemRB.GameType == "bg1"
def GameIsBG2 ():
return GemRB.GameType == "bg2"
def GameIsBG2Demo ():
return ('BG2Demo' in GemRB.__dict__) and (GemRB.BG2Demo == True)
def GameIsTOB ():
return GemRB.HasResource ("worldm25", RES_WMP) and GemRB.GetVar("oldgame") == 0
def HasTOB ():
return GemRB.HasResource ("worldm25", RES_WMP)
def HasHOW ():
return GemRB.HasResource ("expmap", RES_WMP)
def HasTOTL ():
return GemRB.HasResource ("ar9700", RES_ARE)
def GetIWDSpellButtonCount ():
if HasHOW():
return 24
else:
return 20
def SetGamedaysAndHourToken ():
currentTime = GemRB.GetGameTime()
days = currentTime / 7200
hours = (currentTime % 7200) / 300
GemRB.SetToken ('GAMEDAY', str (days))
GemRB.SetToken ('GAMEDAYS', str (days))
GemRB.SetToken ('HOUR', str (hours))
# Adds class/kit abilities
def AddClassAbilities (pc, table, Level=1, LevelDiff=1, align=-1):
TmpTable = GemRB.LoadTable (table)
import Spellbook
# gotta stay positive
if Level-LevelDiff < 0:
return
# we're doing alignment additions
if align == -1:
iMin = 0
iMax = TmpTable.GetRowCount ()
else:
# alignment is expected to be the row required
iMin = align
iMax = align+1
# make sure we don't go out too far
jMin = Level-LevelDiff
jMax = Level
if jMax > TmpTable.GetColumnCount ():
jMax = TmpTable.GetColumnCount ()
for i in range(iMin, iMax):
# apply each spell from each new class
for j in range (jMin, jMax):
ab = TmpTable.GetValue (i, j, 0)
if ab and ab != "****":
# seems all SPINs act like GA_*
if ab[:4] == "SPIN":
ab = "GA_" + ab
# apply spell (AP_) or gain spell (GA_)
if ab[:2] == "AP":
GemRB.ApplySpell (pc, ab[3:])
elif ab[:2] == "GA":
SpellIndex = Spellbook.HasSpell (pc, IE_SPELL_TYPE_INNATE, 0, ab[3:])
if SpellIndex == -1:
GemRB.LearnSpell (pc, ab[3:], LS_MEMO)
else:
# make room for one more memorization
max_mem_cnt = GemRB.GetMemorizableSpellsCount (pc, IE_SPELL_TYPE_INNATE, 0, 0)
GemRB.SetMemorizableSpellsCount (pc, max_mem_cnt+1, IE_SPELL_TYPE_INNATE, 0)
# memorize another spell instance
GemRB.MemorizeSpell (pc, IE_SPELL_TYPE_INNATE, 0, SpellIndex)
else:
print "ERROR, unknown class ability (type): ", ab
# remove all class abilities up to the given level
# for dual-classing mainly
def RemoveClassAbilities (pc, table, Level):
TmpTable = GemRB.LoadTable (table)
import Spellbook
# gotta stay positive
if Level < 0:
return
# make sure we don't go out too far
jMax = Level
if jMax > TmpTable.GetColumnCount ():
jMax = TmpTable.GetColumnCount ()
for i in range(TmpTable.GetRowCount ()):
for j in range (jMax):
ab = TmpTable.GetValue (i, j, 0)
if ab and ab != "****":
# get the index
SpellIndex = Spellbook.HasSpell (pc, IE_SPELL_TYPE_INNATE, 0, ab[3:])
# seems all SPINs act like GA_*
if ab[:4] == "SPIN":
ab = "GA_" + ab
# apply spell (AP_) or gain spell (GA_)?
if ab[:2] == "AP":
GemRB.RemoveEffects (pc, ab[3:])
elif ab[:2] == "GA":
if SpellIndex >= 0:
# TODO: get the correct counts to avoid removing an innate ability
# given by more than one thing?
# RemoveSpell will unmemorize them all too
GemRB.RemoveSpell (pc, IE_SPELL_TYPE_INNATE, 0, SpellIndex)
else:
print "ERROR, unknown class ability (type): ", ab
def UpdateInventorySlot (pc, Button, Slot, Type, Equipped=False):
Button.SetFont ("NUMBER")
Button.SetBorder (0, 0,0,0,0, 128,128,255,64, 0,1)
Button.SetBorder (1, 2,2,2,2, 32,32,255,0, 0,0)
Button.SetBorder (2, 0,0,0,0, 255,128,128,64, 0,1)
Button.SetFlags (IE_GUI_BUTTON_ALIGN_RIGHT | IE_GUI_BUTTON_ALIGN_TOP | IE_GUI_BUTTON_PICTURE, OP_OR)
Button.SetText ("")
if Slot == None:
Button.SetFlags (IE_GUI_BUTTON_PICTURE, OP_NAND)
if Type == "inventory":
Button.SetTooltip (12013) # Personal Item
elif Type == "ground":
Button.SetTooltip (12011) # Ground Item
else:
Button.SetTooltip ("")
Button.EnableBorder (0, 0)
Button.EnableBorder (1, 0)
Button.EnableBorder (2, 0)
else:
item = GemRB.GetItem (Slot['ItemResRef'])
identified = Slot["Flags"] & IE_INV_ITEM_IDENTIFIED
magical = Slot["Flags"] & IE_INV_ITEM_MAGICAL
# MaxStackAmount holds the *maximum* item count in the stack while Usages0 holds the actual
if item["MaxStackAmount"] > 1:
Button.SetText (str (Slot["Usages0"]))
else:
Button.SetText ("")
# auto-identify mundane items; the actual indentification will happen on transfer
if not identified and item["LoreToID"] == 0:
identified = True
if not identified or item["ItemNameIdentified"] == -1:
Button.SetTooltip (item["ItemName"])
Button.EnableBorder (0, 1)
Button.EnableBorder (1, 0)
else:
Button.SetTooltip (item["ItemNameIdentified"])
Button.EnableBorder (0, 0)
if magical:
Button.EnableBorder (1, 1)
else:
Button.EnableBorder (1, 0)
if GemRB.CanUseItemType (SLOT_ALL, Slot['ItemResRef'], pc, Equipped):
Button.EnableBorder (2, 0)
else:
Button.EnableBorder (2, 1)
Button.SetItemIcon (Slot['ItemResRef'], 0)
return
# PST uses a button, IWD2 two types, the rest are the same with two labels
def SetEncumbranceLabels (Window, ControlID, Control2ID, pc, invert_colors = False):
"""Displays the encumbrance as a ratio of current to maximum."""
# Getting the character's strength
sstr = GemRB.GetPlayerStat (pc, IE_STR)
ext_str = GemRB.GetPlayerStat (pc, IE_STREXTRA)
# encumbrance
max_encumb = CommonTables.StrMod.GetValue (sstr, 3) + CommonTables.StrModEx.GetValue (ext_str, 3)
encumbrance = GemRB.GetPlayerStat (pc, IE_ENCUMBRANCE)
Control = Window.GetControl (ControlID)
if GameIsPST():
# FIXME: there should be a space before LB symbol (':')
Control.SetText (str (encumbrance) + ":\n\n\n\n" + str (max_encumb) + ":")
elif GameIsIWD2() and not Control2ID:
Control.SetText (str (encumbrance) + "/" + str(max_encumb) + GemRB.GetString(39537))
else:
Control.SetText (str (encumbrance) + ":")
if not Control2ID: # shouldn't happen
print "Missing second control parameter to SetEncumbranceLabels!"
return
Control2 = Window.GetControl (Control2ID)
Control2.SetText (str (max_encumb) + ":")
ratio = (0.0 + encumbrance) / max_encumb
if ratio > 1.0:
if invert_colors:
Control.SetTextColor (255, 0, 0, True)
else:
Control.SetTextColor (255, 0, 0)
elif ratio > 0.8:
if invert_colors:
Control.SetTextColor (255, 255, 0, True)
else:
Control.SetTextColor (255, 255, 0)
else:
if invert_colors:
Control.SetTextColor (255, 255, 255, True)
else:
Control.SetTextColor (255, 255, 255)
if Control2ID:
Control2.SetTextColor (255, 0, 0)
return
def GetActorClassTitle (actor):
"""Returns the string representation of the actors class."""
ClassTitle = GemRB.GetPlayerStat (actor, IE_TITLE1)
if ClassTitle == 0:
Class = GemRB.GetPlayerStat (actor, IE_CLASS)
ClassIndex = CommonTables.Classes.FindValue ( 5, Class )
KitIndex = GetKitIndex (actor)
Multi = CommonTables.Classes.GetValue (ClassIndex, 4)
Dual = IsDualClassed (actor, 1)
if Multi and Dual[0] == 0: # true multi class
ClassTitle = CommonTables.Classes.GetValue (ClassIndex, 2)
ClassTitle = GemRB.GetString (ClassTitle)
else:
if Dual[0]: # dual class
# first (previous) kit or class of the dual class
if Dual[0] == 1:
ClassTitle = CommonTables.KitList.GetValue (Dual[1], 2)
elif Dual[0] == 2:
ClassTitle = CommonTables.Classes.GetValue (Dual[1], 2)
ClassTitle = GemRB.GetString (ClassTitle) + " / "
ClassTitle += GemRB.GetString (CommonTables.Classes.GetValue (Dual[2], 2))
else: # ordinary class or kit
if KitIndex:
ClassTitle = CommonTables.KitList.GetValue (KitIndex, 2)
else:
ClassTitle = CommonTables.Classes.GetValue (ClassIndex, 2)
if ClassTitle != "*":
ClassTitle = GemRB.GetString (ClassTitle)
else:
ClassTitle = GemRB.GetString (ClassTitle)
#GetActorClassTitle returns string now...
#if ClassTitle == "*":
# return 0
return ClassTitle
def GetKitIndex (actor):
"""Return the index of the actors kit from KITLIST.2da.
Returns 0 if the class is not kitted."""
Class = GemRB.GetPlayerStat (actor, IE_CLASS)
Kit = GemRB.GetPlayerStat (actor, IE_KIT)
KitIndex = 0
if Kit & 0xc000 == 0x4000:
KitIndex = Kit & 0xfff
# carefully looking for kit by the usability flag
# since the barbarian kit id clashes with the no-kit value
if KitIndex == 0 and Kit != 0x4000:
KitIndex = CommonTables.KitList.FindValue (6, Kit)
if KitIndex == -1:
KitIndex = 0
return KitIndex
def IsDualClassed(actor, verbose):
"""Returns an array containing the dual class information.
Return[0] is 0 if not dualclassed, 1 if the old class is a kit, 2 otherwise.
Return[1] contains either the kit or class index of the old class.
Return[2] contains the class index of the new class.
If verbose is false, only Return[0] contains useable data."""
if GameIsIWD2():
return (0,-1,-1)
DualedFrom = GemRB.GetPlayerStat (actor, IE_MC_FLAGS) & MC_WAS_ANY_CLASS
if verbose:
Class = GemRB.GetPlayerStat (actor, IE_CLASS)
ClassIndex = CommonTables.Classes.FindValue (5, Class)
Multi = CommonTables.Classes.GetValue (ClassIndex, 4)
DualInfo = []
KitIndex = GetKitIndex (actor)
if DualedFrom > 0: # first (previous) class of the dual class
MCColumn = CommonTables.Classes.GetColumnIndex ("MC_WAS_ID")
FirstClassIndex = CommonTables.Classes.FindValue (MCColumn, DualedFrom)
if KitIndex:
DualInfo.append (1)
DualInfo.append (KitIndex)
else:
DualInfo.append (2)
DualInfo.append (FirstClassIndex)
# use the first class of the multiclass bunch that isn't the same as the first class
Mask = 1
for i in range (1,16):
if Multi & Mask:
ClassIndex = CommonTables.Classes.FindValue (5, i)
if ClassIndex == FirstClassIndex:
Mask = 1 << i
continue
DualInfo.append (ClassIndex)
break
Mask = 1 << i
if len(DualInfo) != 3:
print "WARNING: Invalid dualclass combination, treating as a single class!"
print DualedFrom, Class, Multi, KitIndex, DualInfo
return (0,-1,-1)
return DualInfo
else:
return (0,-1,-1)
else:
if DualedFrom > 0:
return (1,-1,-1)
else:
return (0,-1,-1)
def IsDualSwap (actor):
"""Returns true if the dualed classes are reverse of expection.
This can happen, because the engine gives dualclass characters the same ID as
their multiclass counterpart (eg. FIGHTER_MAGE = 3). Logic would dictate that
the new and old class levels would be stored in IE_LEVEL and IE_LEVEL2,
respectively; however, if one duals from a fighter to a mage in the above
example, the levels would actually be in reverse of expectation."""
Dual = IsDualClassed (actor, 1)
# not dual classed
if Dual[0] == 0:
return 0
# split the full class name into its individual parts
# i.e FIGHTER_MAGE becomes [FIGHTER, MAGE]
Class = GemRB.GetPlayerStat (actor, IE_CLASS)
Class = CommonTables.Classes.FindValue (5, Class)
Class = CommonTables.Classes.GetRowName (Class)
Class = Class.split("_")
# get our old class name
if Dual[0] == 2:
BaseClass = CommonTables.Classes.GetRowName (Dual[1])
else:
BaseClass = GetKitIndex (actor)
BaseClass = CommonTables.KitList.GetValue (BaseClass, 7)
if BaseClass == "*":
# mod boilerplate
return 0
BaseClass = CommonTables.Classes.FindValue (5, BaseClass)
BaseClass = CommonTables.Classes.GetRowName (BaseClass)
# if our old class is the first class, we need to swap
if Class[0] == BaseClass:
return 1
return 0
def IsMultiClassed (actor, verbose):
"""Returns a tuple containing the multiclass information.
Return[0] contains the total number of classes.
Return[1-3] contain the ID of their respective classes.
If verbose is false, only Return[0] has useable data."""
# change this if it will ever be needed
if GameIsIWD2():
return (0,-1,-1,-1)
# get our base class
ClassIndex = CommonTables.Classes.FindValue (5, GemRB.GetPlayerStat (actor, IE_CLASS))
IsMulti = CommonTables.Classes.GetValue (ClassIndex, 4) # 0 if not multi'd
IsDual = IsDualClassed (actor, 0)
# dual-class char's look like multi-class chars
if (IsMulti == 0) or (IsDual[0] > 0):
return (0,-1,-1,-1)
elif verbose == 0:
return (IsMulti,-1,-1,-1)
# get all our classes (leave space for our number of classes in the return array)
Classes = [0]*3
NumClasses = 0
Mask = 1 # we're looking at multiples of 2
ClassNames = CommonTables.Classes.GetRowName(ClassIndex).split("_")
# loop through each class and test it as a mask
ClassCount = CommonTables.Classes.GetRowCount()
for i in range (1, ClassCount):
if IsMulti&Mask: # it's part of this class
#we need to place the classes in the array based on their order in the name,
#NOT the order they are detected in
CurrentName = CommonTables.Classes.GetRowName (CommonTables.Classes.FindValue (5, i));
if CurrentName == "*":
# we read too far, as the upper range limit is greater than the number of "single" classes
break
for j in range(len(ClassNames)):
if ClassNames[j] == CurrentName:
Classes[j] = i # mask is (i-1)^2 where i is class id
NumClasses = NumClasses+1
Mask = 1 << i # shift to the next multiple of 2 for testing
# in case we couldn't figure out to which classes the multi belonged
if NumClasses < 2:
print "ERROR: couldn't figure out the individual classes of multiclass", ClassNames
return (0,-1,-1,-1)
# return the tuple
return (NumClasses, Classes[0], Classes[1], Classes[2])
def CanDualClass(actor):
# human
if GemRB.GetPlayerStat (actor, IE_RACE) != 1:
return 1
# already dualclassed
Dual = IsDualClassed (actor,0)
if Dual[0] > 0:
return 1
DualClassTable = GemRB.LoadTable ("dualclas")
CurrentStatTable = GemRB.LoadTable ("abdcscrq")
Class = GemRB.GetPlayerStat (actor, IE_CLASS)
ClassIndex = CommonTables.Classes.FindValue (5, Class)
ClassName = CommonTables.Classes.GetRowName (ClassIndex)
KitIndex = GetKitIndex (actor)
if KitIndex == 0:
ClassTitle = ClassName
else:
ClassTitle = CommonTables.KitList.GetValue (KitIndex, 0)
Row = DualClassTable.GetRowIndex (ClassTitle)
# create a lookup table for the DualClassTable columns
classes = []
for col in range(DualClassTable.GetColumnCount()):
classes.append(DualClassTable.GetColumnName(col))
matches = []
Sum = 0
for col in range (0, DualClassTable.GetColumnCount ()):
value = DualClassTable.GetValue (Row, col)
Sum += value
if value == 1:
matches.append (classes[col])
# cannot dc if all the columns of the DualClassTable are 0
if Sum == 0:
print "CannotDualClass: all the columns of the DualClassTable are 0"
return 1
# if the only choice for dc is already the same as the actors base class
if Sum == 1 and ClassName in matches and KitIndex == 0:
print "CannotDualClass: the only choice for dc is already the same as the actors base class"
return 1
AlignmentTable = GemRB.LoadTable ("alignmnt")
AlignsTable = GemRB.LoadTable ("aligns")
Alignment = GemRB.GetPlayerStat (actor, IE_ALIGNMENT)
AlignmentColName = AlignsTable.FindValue (3, Alignment)
AlignmentColName = AlignsTable.GetValue (AlignmentColName, 4)
Sum = 0
for classy in matches:
Sum += AlignmentTable.GetValue (classy, AlignmentColName)
# cannot dc if all the available classes forbid the chars alignment
if Sum == 0:
print "CannotDualClass: all the available classes forbid the chars alignment"
return 1
# check current class' stat limitations
ClassStatIndex = CurrentStatTable.GetRowIndex (ClassTitle)
for stat in range (6):
minimum = CurrentStatTable.GetValue (ClassStatIndex, stat)
name = CurrentStatTable.GetColumnName (stat)
if GemRB.GetPlayerStat (actor, eval ("IE_" + name[4:])) < minimum:
print "CannotDualClass: current class' stat limitations are too big"
return 1
# check new class' stat limitations - make sure there are any good class choices
TargetStatTable = GemRB.LoadTable ("abdcdsrq")
for match in matches:
ClassStatIndex = TargetStatTable.GetRowIndex (match)
for stat in range (6):
minimum = TargetStatTable.GetValue (ClassStatIndex, stat)
name = TargetStatTable.GetColumnName (stat)
if GemRB.GetPlayerStat (actor, eval ("IE_" + name[4:])) < minimum:
matches.remove (match)
break
if len(matches) == 0:
print "CannotDualClass: no good new class choices"
return 1
# must be at least level 2
if GemRB.GetPlayerStat (actor, IE_LEVEL) == 1:
print "CannotDualClass: level 1"
return 1
return 0
def IsWarrior (actor):
Class = GemRB.GetPlayerStat (actor, IE_CLASS)
ClassIndex = CommonTables.Classes.FindValue (5, Class)
ClassName = CommonTables.Classes.GetRowName (ClassIndex)
IsWarrior = CommonTables.ClassSkills.GetValue (ClassName, "NO_PROF")
# warriors get only a -2 penalty for wielding weapons they are not proficient with
IsWarrior = (IsWarrior == -2)
Dual = IsDualClassed (actor, 0)
if Dual[0] > 0:
DualedFrom = GemRB.GetPlayerStat (actor, IE_MC_FLAGS) & MC_WAS_ANY_CLASS
MCColumn = CommonTables.Classes.GetColumnIndex ("MC_WAS_ID")
FirstClassIndex = CommonTables.Classes.FindValue (MCColumn, DualedFrom)
FirstClassName = CommonTables.Classes.GetRowName (FirstClassIndex)
OldIsWarrior = CommonTables.ClassSkills.GetValue (FirstClassName, "NO_PROF")
# there are no warrior to warrior dualclasses, so if the previous class was one, the current one certainly isn't
if OldIsWarrior == -2:
return 0
# but there are also non-warrior to non-warrior dualclasses, so just use the new class check
return IsWarrior
def SetupDamageInfo (pc, Button):
hp = GemRB.GetPlayerStat (pc, IE_HITPOINTS)
hp_max = GemRB.GetPlayerStat (pc, IE_MAXHITPOINTS)
state = GemRB.GetPlayerStat (pc, IE_STATE_ID)
if hp_max < 1:
ratio = 0.0
else:
ratio = (hp+0.0) / hp_max
if hp < 1 or (state & STATE_DEAD):
Button.SetOverlay (0, 64,64,64,200, 64,64,64,200)
else:
Button.SetOverlay (ratio, 140,0,0,205, 128,0,0,200)
ratio_str = "\n%d/%d" %(hp, hp_max)
Button.SetTooltip (GemRB.GetPlayerName (pc, 1) + ratio_str)
return ratio_str
def SetCurrentDateTokens (stat):
# NOTE: currentTime is in seconds, joinTime is in seconds * 15
# (script updates). In each case, there are 60 seconds
# in a minute, 24 hours in a day, but ONLY 5 minutes in an hour!!
# Hence currentTime (and joinTime after div by 15) has
# 7200 secs a day (60 * 5 * 24)
currentTime = GemRB.GetGameTime ()
joinTime = stat['JoinDate'] - stat['AwayTime']
party_time = currentTime - (joinTime / 15)
days = party_time / 7200
hours = (party_time % 7200) / 300
# it is true, they changed the token
if GameIsBG2():
GemRB.SetToken ('GAMEDAY', str (days))
else:
GemRB.SetToken ('GAMEDAYS', str (days))
GemRB.SetToken ('HOUR', str (hours))
return (days, hours)
# return ceil(n/d)
#
def ceildiv (n, d):
if d == 0:
raise ZeroDivisionError("ceildiv by zero")
elif d < 0:
return (n+d+1)/d
else:
return (n+d-1)/d
# a placeholder for unimplemented and hardcoded key actions
def ResolveKey():
return
GameWindow = GUIClasses.GWindow(0)
GameControl = GUIClasses.GControl(0,0)
| JeremyAgost/gemrb | gemrb/GUIScripts/GUICommon.py | Python | gpl-2.0 | 23,257 |
#-*- coding: utf-8 -*-
import os
import sys
import time
import re
from django.core.files import File as DjangoFile
from django.core.management.base import BaseCommand, NoArgsCommand
from optparse import make_option
from obp_legacy.models import *
from spf.models import Request, Match
from spf.util.lookup import MediaLookup, LegacyLookup
from spf.util.match import MediaMatch
from datetime import datetime
DEFAULT_LIMIT = 500
DEFAULT_OFFSET = 0
class SpfWorker(object):
def __init__(self, *args, **kwargs):
self.action = kwargs.get('action')
self.limit = DEFAULT_LIMIT
self.offset = DEFAULT_OFFSET
try:
self.swp_id = int(kwargs.get('swp_id'))
except:
self.swp_id = None
self.verbosity = int(kwargs.get('verbosity', 1))
def run(self):
print 'walker'
print 'action: %s' % self.action
print 'swp_id: %s' % self.swp_id
if self.action == 'lookup':
print 'lookup mode'
total_matches = 0
items = []
if self.swp_id:
items = Request.objects.filter(swp_id=self.swp_id)
else:
items = Request.objects.filter(status=0)[self.offset:(self.limit + self.offset)]
for item in items:
ml = MediaLookup()
try:
num_recordings, recordings, level = ml.run(item)
item.num_results = num_recordings
item.results_mb = recordings
if level > 1:
item.level = level
else:
level = None
item.status = 1
except Exception, e:
print
print
print '********* ERROR ********************'
print e
print '************************************'
item.status = 99
item.save()
if num_recordings > 0:
total_matches += 1
print '********'
print recordings
print '********'
print
print '############# SUMMARY ############'
print 'num queried: %s' % items.count()
print 'num matches: %s' % total_matches
if self.action == 'match':
print 'match mode'
total_matches = 0
items = []
mm = MediaMatch()
if self.swp_id:
items = Request.objects.filter(swp_id=self.swp_id)
else:
items = Request.objects.filter(status=1, num_results__gte=1)[self.offset:(self.limit + self.offset)]
for item in items:
mm.match(item)
print '---------------------------------------------'
print 'swp_id: %s' % item.swp_id
print 'title: %s' % item.title
print 'num_results: %s' % item.num_results
print 'level: %s' % item.level
print
print
print '############# SUMMARY ############'
print 'num queried: %s' % items.count()
print 'num matches: %s' % total_matches
if self.action == 'reset':
print 'legacy mode'
items = Request.objects.all()[self.offset:(self.limit + self.offset)]
for item in items:
item.num_results = None
item.level = None
item.status = 0
item.results_mb = None
item.save()
print
print '############# SUMMARY ############'
print 'num queried: %s' % items.count()
Match.objects.all().delete()
if self.action == 'legacy':
print 'legacy mode'
total_matches = 0
items = []
if self.swp_id:
items = Request.objects.filter(swp_id=self.swp_id)
else:
items = Request.objects.all()[self.offset:(self.limit + self.offset)]
for item in items:
ll = LegacyLookup()
num_recordings, recordings = ll.run(item)
#item.num_results = num_recordings
#item.save()
if num_recordings > 0:
total_matches += 1
print '********'
print recordings
print '********'
print
print '############# SUMMARY ############'
print 'num queried: %s' % items.count()
print 'num matches: %s' % total_matches
class Command(NoArgsCommand):
"""
Import directory structure into alibrary:
manage.py import_folder --path=/tmp/assets/images
"""
option_list = BaseCommand.option_list + (
make_option('--action',
action='store',
dest='action',
default=None,
help='Import files located in the path into django-filer'),
make_option('--swp_id',
action='store',
dest='swp_id',
default=None,
help='Specify an ID to run migration on'),
)
def handle_noargs(self, **options):
worker = SpfWorker(**options)
worker.run()
| hzlf/openbroadcast | website/apps/spf/management/commands/spf_lookup.py | Python | gpl-3.0 | 5,465 |
from django.conf import settings
from django import template
from search_engine_tools.models import SearchEngineTools
register = template.Library()
def insert_bing_meta():
bing_benefactor = SearchEngineTools.objects.get_code('bing', settings.SITE_ID)
tag = '<meta name="msvalidate.01" content="%s" />' % bing_benefactor
return tag
register.simple_tag(insert_bing_meta)
| francisl/django-search-engine-tools | search_engine_tools/templatetags/bing_benefactor.py | Python | bsd-3-clause | 385 |
from generator_tools.picklegenerators import*
from generator_tools.copygenerators import*
import pickle
class GeneratorPickler4Test(object):
def __init__(self, filename):
self.filename = filename
def pkl_device_new(self):
return open(self.filename, "wb")
def pkl_device_load(self):
return open(self.filename, "rb")
def pickle_generator(self, f_gen):
pkl_device = self.pkl_device_new()
return dump(f_gen, pkl_device)
def unpickle_generator(self):
pkl_device = self.pkl_device_load()
return load(pkl_device)
import unittest
import test_support
import time
def pickletest(n=2, raises = None, fun_args = (), kwd_args = {}):
gp = GeneratorPickler4Test("test.pkl")
name = []
def test(method):
def test_wrap(self):
f = method(self)
gen_f = f(*fun_args, **kwd_args)
for i in range(n):
gen_f.next()
if raises:
self.assertRaises(raises, gp.pickle_generator, gen_f)
else:
time.sleep(0.05)
gp.pickle_generator(gen_f)
time.sleep(0.05)
gen_g = gp.unpickle_generator()
self.assertEqual(list(gen_g),list(gen_f))
test_wrap.__name__ = method.__name__
return test_wrap
return test
class TestPickleWhileLoop(unittest.TestCase):
@pickletest(2, fun_args = (1,))
def test_simple_while_loop(self):
def f(x):
i = 0
while i<10:
yield i
i+=1
return f
@pickletest(5)
def test_while_sequence_5(self):
def f():
k = 0
while k<3:
yield k
k+=1
while k<6:
yield k
k+=1
while k<9:
yield k
k+=1
return f
@pickletest(7)
def test_while_sequence_7(self):
def f():
k = 0
while k<3:
yield k
k+=1
while k<6:
yield k
k+=1
while k<9:
yield k
k+=1
return f
@pickletest(2)
def test_nested_while_1(self):
def f():
k = 0
while k<5:
yield k
j = 0
while j<10:
yield (k,j)
j+=1
k+=1
return f
@pickletest(3)
def test_nested_while_2(self):
def f():
k = 0
while k<4:
yield k
j = 0
while j<2:
yield k,j
i = 0
while i<2:
yield k,j,i
i+=1
j+=1
k+=1
return f
@pickletest(3)
def test_nested_while_6(self):
def f():
k = 0
while k<8:
while k<7:
while k<6:
while k<5:
while k<4:
while k<3:
while k<2:
k+=1
yield k
k+=1
yield k
k+=1
yield k
k+=1
yield k
k+=1
yield k
k+=1
yield k
k+=1
yield k
return f
class TestTryStmt(unittest.TestCase):
@pickletest(2)
def test_simple_try_stmt(self):
def f():
try:
yield 0
yield 1
yield 2
1/0
except ZeroDivisionError:
pass
return f
@pickletest(3)
def test_nested_try_stmt(self):
def f():
k = 0
try:
yield 0
try:
yield 1
yield 2
k.bla
except AttributeError:
pass
yield 3
1/0
except ZeroDivisionError:
yield 4
return f
@pickletest(3)
def _test_try_finally(self):
def f():
k = 0
try:
yield 0
yield 1
k.bla
except AttributeError:
yield 2
yield 3
finally:
yield 5
yield 4
return f
@pickletest(3)
def test_try_while(self):
def f():
k = 0
try:
while k<2:
yield k
yield k+1
k+=1
yield 2
1/0
except ZeroDivisionError:
pass
return f
@pickletest(3)
def test_while_try_while(self):
def f():
k = 0
while k<10:
try:
while k<2:
yield k
yield k+1
k+=1
k+=1
yield 2
if k == 7:
1/0
except ZeroDivisionError:
break
return f
class TestForStmt(unittest.TestCase):
@pickletest(3)
def test_simple_for(self):
def f():
r = for_iter(range(10))
for i in r:
yield i
return f
@pickletest(77)
def test_nested_for(self):
def f():
r1 = for_iter(range(10))
for i in r1:
r2 = for_iter(range(10))
for j in r2:
yield i+j
return f
@pickletest(15)
def test_sequence_of_nested_for_2(self):
def f():
r1 = for_iter(range(3))
for i in r1:
r2 = for_iter(range(3))
for j in r2:
yield i+j
r3 = for_iter(range(3))
for i in r3:
r4 = for_iter(range(3))
for j in r4:
yield i+j
return f
@pickletest(77)
def test_deep_nesting(self):
def f():
r1 = for_iter(range(2))
for i1 in r1:
r2 = for_iter(range(2))
for i2 in r2:
r3 = for_iter(range(2))
for i3 in r3:
r4 = for_iter(range(2))
for i4 in r4:
r5 = for_iter(range(2))
for i5 in r5:
r6 = for_iter(range(2))
for i6 in r6:
r7 = for_iter(range(2))
for i7 in r7:
yield i1+i2+i3+i4+i5+i6+i7
return f
class TestForAndWhileStmt(unittest.TestCase):
@pickletest(9)
def test_for_while(self):
def f():
r = for_iter(range(7))
for i in r:
j = 0
while j<5:
yield i+j
j+=1
yield i-j
return f
@pickletest(9)
def test_while_for(self):
def f():
j = 0
while True:
r = for_iter(range(7))
for i in r:
yield i+j
j+=1
yield i-j
if j == 5:
break
return f
class TestForAndWhileStmtWithAddArgs(unittest.TestCase):
@pickletest(9, fun_args = (3,4))
def test_fun_args(self):
def f(x,y):
r = for_iter(range(7))
for i in r:
j = 0
while j<5:
yield i+j+x
j+=1
yield i-j-y
return f
@pickletest(9, fun_args = (3,4), kwd_args = {"a":9})
def test_fun_and_kwd_args(self):
def f(x,y,a):
r = for_iter(range(7))
for i in r:
j = 0
while j<5:
yield i+j+x
j+=1
yield i-j-y-a
return f
class TestMultipleGenerators(unittest.TestCase):
def __getstate__(self):
d = dict(self.__dict__)
d["_resultForDoCleanups"] = None
return d
def g1(self, x,y):
r = for_iter(range(7))
for i in r:
j = 0
while j<5:
yield i+j+x
j+=1
yield i-j-y
def g2(self, x,y):
r = for_iter(range(x,y))
for i in r:
yield i
def g3(self,x,y):
r = for_iter(range(7))
for i in r:
yield i+x+y
@pickletest(20, fun_args = (3,4))
def test_fun_args(self):
def f(x,y):
r = for_iter(self.g1(4,5))
for i in r:
j = 0
while j<5:
yield i+j+x
j+=1
yield i-j-y
return f
@pickletest(15, fun_args = (3,4))
def test_multi_gen1(self):
def f(x,y):
G = for_iter([self.g2(0,7), self.g2(7,14), self.g2(14, 21)])
for h in G:
H = for_iter(h)
for item in H:
yield item
return f
@pickletest(20, fun_args = (3,4))
def test_multi_gen2(self):
def f(x,y):
G = for_iter(self.g3(i,j) for i in [1,2] for j in [3,4])
for h in G:
H = for_iter(h)
for item in H:
yield item + x + y
return f
@pickletest(5, fun_args = (3,4))
def test_multi_gen3(self):
def f(x,y):
G = for_iter(self.g3(i,j) for i in [1,2] for j in [3,4])
for h in G:
H = for_iter(h)
for item in H:
yield item + x + y
return f
class TestPickleUnpickleSequences(unittest.TestCase):
def test_pickle_of_unpickled(self):
def f(x):
r = for_iter(range(x))
for i in r:
yield i
gp = GeneratorPickler4Test("test.pkl")
gen_f = f(10)
gen_f.next()
gen_f.next()
gp.pickle_generator(gen_f)
gen_g = gp.unpickle_generator()
gen_g.next()
gen_f.next()
gp.pickle_generator(gen_g)
gen_h = gp.unpickle_generator()
l_g = list(gen_g)
l_h = list(gen_h)
l_f = list(gen_f)
self.assertEqual(l_g, l_h)
self.assertEqual(l_g, l_f)
def test_chain_pkl_upkl(self):
def f(x):
r = for_iter(range(x))
for i in r:
yield i
gp = GeneratorPickler4Test("test.pkl")
gen_f = f(10)
gen_f.next()
gp.pickle_generator(gen_f)
gen_g = gp.unpickle_generator()
gen_g.next()
gp.pickle_generator(gen_g)
gen_h = gp.unpickle_generator()
gen_h.next()
gp.pickle_generator(gen_h)
gen_k = gp.unpickle_generator()
self.assertEqual(gen_k.next(), gen_h.next())
def test_chain_pkl_upkl_without_next(self):
def f(x):
r = for_iter(range(x))
for i in r:
yield i
gen_f = f(10)
gen_f.next()
gen_f.next()
gen_f.next()
gp = GeneratorPickler4Test("test.pkl")
gp.pickle_generator(gen_f)
gen_g = gp.unpickle_generator()
gp.pickle_generator(gen_g)
gen_h = gp.unpickle_generator()
gp.pickle_generator(gen_h)
gen_k = gp.unpickle_generator()
gp.pickle_generator(gen_k)
gen_m = gp.unpickle_generator()
l_g = list(gen_g)
l_h = list(gen_h)
l_k = list(gen_k)
l_m = list(gen_m)
self.assertEqual(l_g, l_h)
self.assertEqual(l_h, l_k)
self.assertEqual(l_k, l_m)
class TestPickleClasses(unittest.TestCase):
def setUp(self):
self.test_pkl = open("test.pkl", "wb")
def tearDown(self):
if not self.test_pkl.closed:
self.test_pkl.close()
def test_GeneratorPickler(self):
gp = Pickler(self.test_pkl)
def f(x):
r = for_iter(range(x))
for i in r:
yield i
gen_f = f(10)
gen_f.next()
gen_f.next()
gp.dump(gen_f)
self.test_pkl.close()
self.test_pkl = open("test.pkl", "rb")
up = Unpickler(self.test_pkl)
gen_g = up.load()
self.assertEqual(list(gen_g), list(gen_f))
class TestPicklingDependencies(unittest.TestCase):
def test_dependent_pickles(self):
'''
This tests a fix of version 0.1.1
Problem ( reported by Lorenz Quack ):
Pickling of generators is a staged process. First gen_f is mapped onto a GeneratorSnapshot GS_f
then GS_f is pickled. On unpickling, GS_f is reconstructed. Finally gen_f is built using
gen_f = copy_generator(GS_f).
Suppose you send an object X into gen_f transform gen_f into GS_f, then pickle (X, GS_f) and later
unpickle it to reconstruct (X, GS_f). The unpickled GS_f still holds a reference on X. But this
reference is destroyed by copy_generator(GS_f) because copy_generator makes a deepcopy.
This behaviour can be avoided. Usually GS_f will be destroyed just after execution of copy_generator(GS_f).
Instead of deepcopying the locals of GS_f we just move the references into gen_f.
The following test demonstrates the behaviour.
'''
lst = range(10)
def f(obj):
while obj:
yield obj.pop(0)
gen_f = f(lst)
self.assertEqual(gen_f.next(), 0)
pickle.dump( (lst, GeneratorSnapshot(gen_f)), open("test.pkl", "wb"))
lst_u, gen_f_u = load( open("test.pkl", "rb") )
#gen_f_u = copy_generator(GS_f_u, copy_filter = lambda loc: True )
self.assertEqual( gen_f_u.next(), 1)
self.assertEqual( gen_f_u.next(), 2)
self.assertEqual( lst_u[0], gen_f_u.next()) # lst_u is the list used in gen_f_u
def test_generator_identity(self):
'''
This tests a fix of version 0.3
Problem ( reported by Jenna Louis ):
When a generator object G exists on different locations loc1, loc2 sameness isn't preserved when it gets
pickled i.e. two distinct GeneratorSnapshots GS1, GS2 will be created. This glitch can be noticed
on unpickling. The fix requires some workaround in the pickling module because load_build shall be called
for each location of G. So we create a unique GS from G but wrap GS again into SnapshotEnvelopes
EGS1, EGS2. They are distinct but refer to the same GS. On unpickling load_build is called for each EGSx
and a unique generatorcopy can be reconstructed from GS.
'''
lst = range(10)
def f(obj):
while obj:
yield obj.pop(0)
gen_f = f(lst)
self.assertEqual(gen_f.next(), 0)
dump([gen_f, gen_f], open("test.pkl", "wb"))
gen_f_u, gen_g_u = load( open("test.pkl", "rb") )
self.assertEqual( gen_f_u, gen_g_u)
def test_main():
test_support.run_unittest(TestPickleWhileLoop)
test_support.run_unittest(TestTryStmt)
test_support.run_unittest(TestForStmt)
test_support.run_unittest(TestForAndWhileStmt)
test_support.run_unittest(TestForAndWhileStmtWithAddArgs)
test_support.run_unittest(TestMultipleGenerators)
test_support.run_unittest(TestPickleUnpickleSequences)
test_support.run_unittest(TestPickleClasses)
test_support.run_unittest(TestPicklingDependencies)
if __name__ == '__main__':
test_main()
| tonyroberts/generator_tools | generator_tools/tests/test_picklegenerators.py | Python | bsd-3-clause | 16,826 |
#!/usr/bin/env python3
"""
A script to normalize lparse programs so that completion can be computed without the introduction of auxiliary atoms.
An lparse program is read from STDIN, and the normalized lparse program is printed on STDOUT.
"""
from collections import OrderedDict
from enum import Enum
import fileinput
class RuleType(Enum):
NORMAL_RULE = 1
CARDINALITY_RULE = 2
CHOICE_RULE = 3
WEIGHT_RULE = 5
definition = OrderedDict()
minimize_rules = []
verbatim = []
next_var = 0
def normal_rule(x):
global next_var
for id in x[2:]: next_var = max(next_var, int(id)+1)
return (RuleType.NORMAL_RULE, x)
def cardinality_rule(x):
global next_var
for id in x[3:]: next_var = max(next_var, int(id)+1)
return (RuleType.CARDINALITY_RULE, x)
def choice_rule(x):
global next_var
for id in x[2:]: next_var = max(next_var, int(id)+1)
return (RuleType.CHOICE_RULE, x)
def weight_rule(x):
global next_var
for id in x[3:3+int(x[1])]: next_var = max(next_var, int(id)+1)
return (RuleType.WEIGHT_RULE, x)
def minimize_rule(x):
global next_var
for id in x[2:2+int(x[0])]: next_var = max(next_var, int(id)+1)
return x
def isUnit(x):
return x[1][0] == '1' or x[1][0] == '0'
def print_definition(head, body):
if body[0] == RuleType.NORMAL_RULE:
print(1, head, ' '.join(body[1]))
elif body[0] == RuleType.CARDINALITY_RULE:
print(2, head, ' '.join(body[1]))
elif body[0] == RuleType.CHOICE_RULE:
print(3, 1, head, ' '.join(body[1]))
elif body[0] == RuleType.WEIGHT_RULE:
print(5, head, ' '.join(body[1]))
else:
exit("Unexpcted body of type {}".format(body[0]))
def readProgram(line):
if line[0] == '1':
head = line[1]
body = line[2:]
if head not in definition: definition[head] = []
definition[head].append(normal_rule(body))
elif line[0] == '2':
head = line[1]
body = line[2:]
if head not in definition: definition[head] = []
definition[head].append(cardinality_rule(body))
elif line[0] == '3':
hsize = int(line[1])
head = line[2 : 2 + hsize]
body = line[2 + hsize :]
for x in head:
if x not in definition: definition[x] = []
definition[x].append(choice_rule(body))
elif line[0] == '5':
head = line[1]
body = line[2:]
if head not in definition: definition[head] = []
definition[head].append(weight_rule(body))
elif line[0] == '6':
minimize_rules.append(minimize_rule(line[2:]))
else:
exit("Cannot handle rule {}".format(' '.join(line)))
def readVerbatim(line):
verbatim.append(line)
def printProgram():
global next_var
next_var = max(next_var, max([int(x) for x in definition]) + 1)
for x in definition:
if len(definition[x]) == 1:
print_definition(x, definition[x][0])
else:
for d in definition[x]:
if isUnit(d):
print_definition(x, d)
else:
print_definition(str(next_var), d)
print(1, x, 1, 0, next_var)
next_var += 1
for x in minimize_rules: print(6, 0, ' '.join(x))
for x in verbatim: print(' '.join(x))
if __name__ == "__main__":
callback = readProgram
for line in fileinput.input('-'):
line = line.strip()
if line == '0':
callback = readVerbatim
callback(line.split())
printProgram()
| alviano/python | asp-proof/normalizer.py | Python | gpl-3.0 | 3,546 |
from phystricks import *
def DerivTangente():
pspict,fig = SinglePicture("DerivTangenteOM")
a=3
xb=6.5
mx=1
Mx=7
x=var('x')
f=phyFunction((x/3)**3/2+1).graph(mx,Mx)
A=f.get_point(a)
Ax=Point(A.x,0)
Ay=Point(0,A.y)
X=f.get_point(xb)
Xx=Point(X.x,0)
Xy=Point(0,X.y)
Ay.put_mark(0.1,180,"$f(a)$",automatic_place=(pspict,"E"))
Xy.put_mark(0.1,180,"$f(x)$",automatic_place=(pspict,"E"))
Ax.put_mark(0.2,-90,"$a$",automatic_place=(pspict,"N"))
Xx.put_mark(0.2,-90,"$x$",automatic_place=(pspict,"N"))
v1=Segment(X,Xx)
v2=Segment(A,Ax)
h1=Segment(X,Xy)
h2=Segment(A,Ay)
I=Intersection(v1,h2)[0]
h3=Segment(A,I)
v1.parameters.color="green"
v1.parameters.style="dashed"
v2.parameters=v1.parameters
h1.parameters=v1.parameters
h2.parameters=v1.parameters
h3.parameters=v1.parameters
corde=Segment(A,X).dilatation(1.5)
corde.parameters.color="cyan"
Dx=MeasureLength(h3,0.2)
#Dx.put_mark(0.2,-90,"$\Delta x$",automatic_place=(pspict,"N"))
Dx.put_mark(0.2,-90,"$x-a$",automatic_place=(pspict,"N"))
Dy=MeasureLength(Segment(X,I),-0.2)
#Dy.put_mark(0.2,0,"$\Delta y$",automatic_place=(pspict,"W"))
Dy.put_mark(0.2,0,"$f(x)-f(a)$",automatic_place=(pspict,"W"))
pspict.DrawGraphs(corde,v1,v2,h1,h2,h3,f,A,Ax,Ay,X,Xx,Xy,Dx,Dy)
pspict.axes.no_graduation()
pspict.DrawDefaultAxes()
pspict.dilatation(1)
fig.conclude()
fig.write_the_file()
| Naereen/mazhe | phystricksDerivTangente.py | Python | gpl-3.0 | 1,369 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('openfonacide', '0014_auto_20141213_1812'),
]
operations = [
migrations.AlterField(
model_name='espacios',
name='justificacion',
field=models.CharField(max_length=900, null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='sanitarios',
name='justificacion',
field=models.CharField(max_length=900, null=True),
preserve_default=True,
),
]
| nemesiscodex/openfonacide | openfonacide/migrations/0015_auto_20141213_1846.py | Python | lgpl-3.0 | 674 |
#!/usr/bin/env python
import re
from logging import warn
from itertools import chain
from common import pairwise, sentence_to_tokens
from sentencesplit import text_to_sentences
from tagsequence import is_tag, is_start_tag, is_continue_tag, OUT_TAG
# TODO: standoff.py interface should be narrower
from standoff import Textbound, parse_textbounds, eliminate_overlaps, \
verify_textbounds, filter_textbounds, retag_document
# UI identifiers for supported formats
TEXT_FORMAT = 'text'
STANDOFF_FORMAT = 'standoff'
NERSUITE_FORMAT = 'nersuite'
CONLL_FORMAT = 'conll'
BC2GM_FORMAT = 'bc2gm'
FORMATS = [
TEXT_FORMAT,
STANDOFF_FORMAT,
NERSUITE_FORMAT,
CONLL_FORMAT,
BC2GM_FORMAT,
]
DEFAULT_FORMAT=NERSUITE_FORMAT
# TODO: remove once https://github.com/nlplab/nersuite/issues/28 is resolved
NERSUITE_TOKEN_MAX_LENGTH = 500
class Token(object):
"""Token with position in document context, tag, and optional
features."""
def __init__(self, text, start, tag=OUT_TAG, fvec=None):
self.tag = tag
self.text = text
self.start = start
self.end = self.start + len(self.text)
if fvec is None:
self.fvec = []
else:
self.fvec = fvec[:]
assert self.is_valid()
def is_valid(self):
assert self.end == self.start + len(self.text)
assert is_tag(self.tag)
return True
def tagged_type(self):
# TODO: DRY!
assert self.tag and self.tag != OUT_TAG
return self.tag[2:]
def to_nersuite(self, exclude_tag=False):
"""Return Token in NERsuite format."""
if len(self.text) > NERSUITE_TOKEN_MAX_LENGTH:
# NERsuite crashes on very long tokens, this exceptional
# processing seeks to protect against that; see
# https://github.com/nlplab/nersuite/issues/28
import sys
print('Warning: truncating very long token (%d characters) for NERsuite' % len(self.text), file=sys.stderr)
text = self.text[:NERSUITE_TOKEN_MAX_LENGTH]
else:
text = self.text
fields = ([self.tag] if not exclude_tag else []) + \
[str(self.start), str(self.end), str(text)]
return '\t'.join(chain(fields, self.fvec))
def to_conll(self, include_offsets=False):
"""Return Token in CoNLL-like format."""
fields = [str(self.text), self.tag]
if include_offsets:
offsets = [str(self.start), str(self.end)]
fields = fields[:1] + offsets + fields[1:]
return '\t'.join(chain(fields, self.fvec))
@classmethod
def from_text(cls, text, offset=0):
"""Return Token for given text."""
return cls(text, offset)
@classmethod
def from_nersuite(cls, line):
"""Return Token given NERsuite format representation."""
line = line.rstrip('\n')
fields = line.split('\t')
try:
tag, start, end, text = fields[:4]
except ValueError:
raise FormatError('NERsuite format: too few fields ("%s")' % line)
try:
start, end = int(start), int(end)
except ValueError:
raise FormatError('NERsuite format: non-int start/end ("%s")'% line)
if end-start != len(text):
raise FormatError('NERsuite format: length mismatch ("%s")'% line)
return cls(text, start, tag, fields[4:])
class Sentence(object):
"""Sentence containing zero or more Tokens."""
def __init__(self, text, base_offset, tokens):
self.text = text
self.base_offset = base_offset
self.tokens = tokens[:]
assert self.is_valid()
def is_valid(self):
"""Return True if the Sentence is correctly composed of Tokens,
False otherwise."""
for t in self.tokens:
tstart, tend = t.start-self.base_offset, t.end-self.base_offset
assert self.text[tstart:tend] == t.text
assert t.is_valid()
# TODO: check that tokens are non-overlapping and fully cover
# the Sentence text.
return True
def get_tagged(self, relative_offsets=False):
"""Return list of (type, start, end) based on Token tags.
If relative_offsets is True, start and end offsets are
relative to sentence beginning; otherwise, they are absolute
offsets into the document text.
"""
tagged = []
first = None
for t, next_t in pairwise(self.tokens, include_last=True):
if is_start_tag(t.tag):
first = t
if first and not (next_t and is_continue_tag(next_t.tag)):
tagged.append((first.tagged_type(), first.start, t.end))
first = None
if relative_offsets:
tagged = [(t[0], t[1]-self.base_offset, t[2]-self.base_offset)
for t in tagged]
return tagged
def to_nersuite(self, exclude_tag=False):
"""Return Sentence in NERsuite format."""
# empty "sentences" map to nothing in the NERsuite format.
if not self.tokens:
return ''
# tokens with empty or space-only text are ignored
tokens = [t for t in self.tokens if t.text and not t.text.isspace()]
# sentences terminated with empty lines in NERsuite format
return '\n'.join(chain((t.to_nersuite(exclude_tag)
for t in tokens), ['\n']))
def to_conll(self, include_offsets=False):
"""Return Sentence in CoNLL-like format."""
# empty "sentences" map to nothing
if not self.tokens:
return ''
# tokens with empty or space-only text are ignored
tokens = [t for t in self.tokens if t.text and not t.text.isspace()]
# sentences terminated with empty lines
return '\n'.join(chain((t.to_conll(include_offsets) for t in tokens),
['\n']))
def standoffs(self, index):
"""Return sentence annotations as list of Standoff objects."""
textbounds = []
for type_, start, end in self.get_tagged():
tstart, tend = start-self.base_offset, end-self.base_offset
textbounds.append(Textbound('T%d' % index, type_, start, end,
self.text[tstart:tend]))
index += 1
return textbounds
def get_tags(self):
"""Return set of all tags in Sentence."""
tags = set()
for t in self.tokens:
tags.add(t.tag)
return tags
def __len__(self):
"""Return length of Sentence in Tokens."""
return len(self.tokens)
@classmethod
def from_text(cls, text, base_offset=0, tokenization_re=None):
tokens = []
offset = 0
for t in sentence_to_tokens(text, tokenization_re):
if not t.isspace():
tokens.append(Token.from_text(t, offset+base_offset))
offset += len(t)
return cls(text, base_offset, tokens)
@classmethod
def from_nersuite(cls, lines, base_offset=0):
"""Return Sentence given NERsuite format lines."""
tokens = []
for line in lines:
tokens.append(Token.from_nersuite(line))
if tokens:
base_offset = tokens[0].start
# The NERsuite format makes no record of space, so text needs
# to be approximated.
texts = []
prev_offset = base_offset
for t in tokens:
texts.append(' ' * (t.start-prev_offset))
texts.append(t.text)
prev_offset = t.end
text = ''.join(texts)
return cls(text, base_offset, tokens)
class Document(object):
"""Text document containing zero or more Sentences."""
def __init__(self, text, sentences):
self.text = text
self.sentences = sentences[:]
self.id = None
assert self.is_valid()
def is_valid(self):
"""Return True if the Document is valid (correctly composed of
Sentences etc.), False otherwise."""
assert ''.join(s.text for s in self.sentences) == self.text
assert not any(not s.is_valid() for s in self.sentences)
# TODO: check that annotations are within doc span etc.
return True
def standoffs(self):
"""Return document annotations as list of Standoff objects."""
index = 1
standoffs = []
for s in self.sentences:
s_standoffs = s.standoffs(index)
standoffs.extend(s_standoffs)
index += len(s_standoffs)
return standoffs
def get_tags(self):
"""Return set of all tags in Document."""
tags = set()
for s in self.sentences:
tags |= s.get_tags()
return tags
def to_nersuite(self, exclude_tag=False):
"""Return Document in NERsuite format."""
return ''.join((s.to_nersuite(exclude_tag) for s in self.sentences))
def to_conll(self, include_offsets=False, include_docid=False):
"""Return Document in CoNLL-like format."""
if not include_docid:
s = ''
else:
s = '# doc_id = %s\n' % self.id
return s+''.join((s.to_conll(include_offsets) for s in self.sentences))
def to_standoff(self):
"""Return Document annotations in BioNLP ST/brat-flavored
standoff format."""
standoffs = self.standoffs()
return '\n'.join(str(s) for s in standoffs)+'\n' if standoffs else ''
def to_bc2gm(self):
"""Return Document annotations in BioCreative 2 Gene Mention
format."""
lines = []
for s in self.sentences:
tagged = s.get_tagged(relative_offsets=True)
tagged = [(t[0], t[1], t[2], s.text[t[1]:t[2]]) for t in tagged]
# The BC2GM format ignores space when counting offsets,
# and is inclusive for the end offset. Create mapping
# from standard to no-space offsets and remap.
offset_map = {}
o = 0
for i, c in enumerate(s.text):
if not c.isspace():
offset_map[i] = o
o += 1
tagged = [(t[0], offset_map[t[1]], offset_map[t[2]-1], t[3])
for t in tagged]
for t in tagged:
lines.append('%s|%d %d|%s\n' % (self.sentence_id(s),
t[1], t[2], t[3]))
return ''.join(lines)
def bc2gm_text(self):
return ''.join(['%s %s\n' % (self.sentence_id(s), s.text)
for s in self.sentences])
def sentence_id(self, s):
return 'P%sO%d' % (self.id, s.base_offset)
def __len__(self):
"""Return length of Document in Sentences."""
return len(self.sentences)
@classmethod
def from_text(cls, text, sentence_split=True, annotations=None,
tokenization_re=None):
"""Return Document with given text and no annotations.
If annotations is not None, avoid creating sentence splits
that would split given annotations.
"""
split = text_to_sentences(text, sentence_split)
assert ''.join(split) == text, 'sentence split mismatch'
if sentence_split and annotations:
# Re-join splits that break up annotations (TODO: avoid O(nm))
rejoined = []
o, prev = 0, None
for s in split:
if any(a for a in annotations if a.start < o and a.end >= o):
warn('rejoin ssplit: {} /// {}'.format(
prev.encode('utf-8'), s.encode('utf-8')))
rejoined[-1] = rejoined[-1] + s
else:
rejoined.append(s)
o += len(s)
prev = s
split = rejoined
assert ''.join(split) == text, 'sentence rejoin error'
sentences = []
offset = 0
for s in split:
sentences.append(Sentence.from_text(
s, offset, tokenization_re=tokenization_re)
)
offset += len(s)
return cls(text, sentences)
@classmethod
def from_nersuite(cls, text):
"""Return Document given NERsuite format file."""
sentences = []
lines = []
offset = 0
for line in split_keep_separator(text):
if not line:
pass
elif not line.isspace():
lines.append(line)
else:
sentences.append(Sentence.from_nersuite(lines, offset))
if sentences[-1].tokens:
offset = sentences[-1].tokens[-1].end + 1 # guess
lines = []
if lines:
sentences.append(Sentence.from_nersuite(lines, offset))
# Add spaces for gaps implied by token positions but not
# explitly recorded in NERsuite format
for s, next_s in pairwise(sentences):
if s.tokens and next_s.tokens:
gap = next_s.tokens[0].start - s.tokens[-1].end
s.text = s.text + ' ' * gap
# Assure document-final newline (text file)
if sentences and not sentences[-1].text.endswith('\n'):
sentences[-1].text = sentences[-1].text + '\n'
text = ''.join(s.text for s in sentences)
return cls(text, sentences)
@classmethod
def from_standoff(cls, text, annotations, sentence_split=True,
discont_rule=None, overlap_rule=None,
filter_types=None, exclude_types=None,
tokenization_re=None, document_id=None):
"""Return Document given text and standoff annotations."""
# first create a document from the text without annotations
# with all "out" tags (i.e. "O"), then re-tag the tokens based
# on the textbounds.
textbounds = parse_textbounds(annotations, discont_rule)
document = cls.from_text(text, sentence_split, textbounds,
tokenization_re=tokenization_re)
if document_id is not None:
document.id = document_id
if filter_types:
textbounds = filter_textbounds(textbounds, filter_types)
if exclude_types:
textbounds = filter_textbounds(textbounds, exclude_types,
exclude=True)
verify_textbounds(textbounds, text)
textbounds = eliminate_overlaps(textbounds, overlap_rule)
retag_document(document, textbounds)
return document
| spyysalo/standoff2conll | document.py | Python | mit | 14,662 |
#!/usr/bin/env python
'''
PiOS example according to
"Constructing molecular pi-orbital active spaces for multireference calculations of conjugated systems"
E. R. Sayfutyarova and S. Hammes-Schiffer, J. Chem. Theory Comput., 15, 1679 (2019).
'''
import numpy
from pyscf import gto
from pyscf.gto import mole
from pyscf.gto import moleintor
from pyscf import scf
from pyscf import ao2mo
from pyscf import mcscf
from functools import reduce
from pyscf import fci
from pyscf.tools import fcidump
from pyscf import mrpt
from pyscf.mcscf.PiOS import MakePiOS
mol=gto.Mole()
mol.atom='''
C -0.205041040016 4.293235977701 0.000000000000
C 0.403262807645 3.101311953997 -0.000000000000
C -0.282048458140 1.831603049166 0.000000000000
C 0.338470548365 0.633304932323 0.000000000000
C -0.338470548365 -0.633304932323 0.000000000000
C 0.282048458140 -1.831603049166 0.000000000000
C -0.403262807645 -3.101311953997 0.000000000000
C 0.205041040016 -4.293235977701 0.000000000000
H 0.357619144805 5.215664988374 0.000000000000
H -1.284762686441 4.375962764712 0.000000000000
H 1.488448703032 3.064020960940 0.000000000000
H -1.367965273026 1.860302168845 0.000000000000
H 1.424632376981 0.609318533944 0.000000000000
H -1.424632376981 -0.609318533944 0.000000000000
H 1.367965273026 -1.860302168845 0.000000000000
H -1.488448703032 -3.064020960940 0.000000000000
H 1.284762686441 -4.375962764712 0.000000000000
H -0.357619144805 -5.215664988374 0.000000000000
'''
mol.basis = 'aug-cc-pvtz'
mol.verbose=5
mol.spin =0
mol.build()
# Rotate the molecule so that it's not parallel to xy-plane
numpy.random.seed(1)
u = numpy.linalg.svd(numpy.random.random((3,3)))[0]
mol.set_geom_(mol.atom_coords().dot(u), unit='Bohr')
mf=scf.RHF(mol)
mf.kernel()
PiAtoms = [1,2,3,4,5,6,7,8] #list atom numbers for your pi-system, counting from 1
N_Core,N_Actorb, N_Virt,nelec,coeff=MakePiOS(mol,mf,PiAtoms)
#if you don't want the entire pi-space, use MakePiOS(mol,mf,PiAtomsList, nPiOcc,nPiVirt), where nPiOcc and nPiVirt determine how many HOMOs and LUMOs should be picked up
nalpha=(nelec+mol.spin)//2
nbeta=(nelec-mol.spin)//2
#=================================run CASSCF
mycas = mcscf.CASSCF(mf, N_Actorb, [nalpha,nbeta])
AS=range(N_Core,N_Core+N_Actorb)
mycas =mycas.state_average_([0.2, 0.2, 0.2,0.2,0.2])
mycas.chkfile ='cas_c8h10.chk'
mycas.fcisolver.nroots = 5
mycas.fix_spin_(ss=0)
activeMO = mcscf.sort_mo(mycas,coeff,AS,base=0)
mycas.verbose = 5
mycas.max_cycle_macro = 150
mycas.kernel(activeMO)
#==================================run CASCI followed by NEVPT2 with CASSCF orbitals
mycas = mcscf.CASCI(mf, N_Actorb, [nalpha,nbeta])
mycas.__dict__.update(scf.chkfile.load('cas_c8h10.chk', 'mcscf'))
mycas.fcisolver.nroots = 5
mycas.fix_spin_(ss=0)
mycas.verbose = 5
mycas.kernel()
ci_nevpt_e1 = mrpt.NEVPT(mycas, root=0).kernel()
ci_nevpt_e2 = mrpt.NEVPT(mycas, root=1).kernel()
ci_nevpt_e3 = mrpt.NEVPT(mycas, root=2).kernel()
ci_nevpt_e4 = mrpt.NEVPT(mycas, root=3).kernel()
ci_nevpt_e5 = mrpt.NEVPT(mycas, root=4).kernel()
| sunqm/pyscf | examples/mcscf/43-PiOS-C8H10.py | Python | apache-2.0 | 3,112 |
from gmres import *
from poisson import *
cell = 1
# Distance hierarchy-aware oct-tree suitable for calculating FMM integrals.
class otree:
def __init__(self, x, levels=4, Nmin=20, box=None):
global cell
if box == None: # new tree
box = array([x.min(0), x.max(0)])
self.R = sqrt(sum((box[1]-box[0])**2))/sqrt(3.0)
#print "box = " + str(box) + " R = %f"%self.R
self.cell = cell
cell += 1
self.levels = levels # Max number of levels below self.
self.ctr = 0.5*sum(box, 0) # center
if self.levels == 0 or len(x) < Nmin:
self.x = x - self.ctr
self.child = None
#print " c%d [label=\"%d\"];"%(self.cell, len(self.x))
return
self.x = x - self.ctr
#self.x = None # may be required by nearby low density region!
# split in 8 pieces
self.child = []
for i in range(2):
if i == 0:
li = array(where(x[:,0] < self.ctr[0]), dtype=np.int)
else:
li = array(where(x[:,0] >= self.ctr[0]), dtype=np.int)
for j in range(2):
if j == 0:
lj = li[where(x[li,1] < self.ctr[1])]
else:
lj = li[where(x[li,1] >= self.ctr[1])]
for k in range(2):
if k == 0:
lk = lj[where(x[lj,2] < self.ctr[2])]
else:
lk = lj[where(x[lj,2] >= self.ctr[2])]
cbox = box.copy()
cbox[1-i,0] = self.ctr[0]
cbox[1-j,1] = self.ctr[1]
cbox[1-k,2] = self.ctr[2]
o = otree(x[lk], levels-1, Nmin, cbox)
#print " c%d -> c%d;"%(self.cell, o.cell)
# translation to child ctr, list of child indices, child object
self.child.append((self.ctr - 0.5*sum(cbox,0), lk, o))
# Set interaction list through a recursive call.
# s is the set of all cells neighboring the parent, (including the parent)
# in the form: [(dist, otree)], where dist is the integer distance (-1,0,1)^3
def set_ilist(self, crd, s):
adj = []
self.interact = []
num = lambda k: array([k/4, (k/2)%2, k%2])
# Every child (or self) of s is classified as either adj. or interact.
for d,p in s:
if p.child != None:
for k,c in enumerate(p.child):
dist = 2*d + num(k) - crd # simplified centroid sep.
if all(abs(dist) <= 1):
adj.append((dist, c[2]))
else:
self.interact.append((self.ctr - c[2].ctr, c[2]))
elif p.x != None:
dist = 4*d - 2*crd + np.ones(3, np.int) # centroid sep.
if all(abs(dist) <= 3): # convex hull addition r=1 + r=2
adj.append((d, p)) # act as neighbor in 'd' direction
else:
self.interact.append((self.ctr - p.ctr, p))
#print "testing interact:"
#for t,c in self.interact:
# if sqrt(sum(t**2)) < 1.5*self.R:
# print "I: %f (R = %f)"%(sqrt(sum(t**2)), self.R)
# Recurse through child lists, which work with my adj list.
if self.child != None:
#print " Node, %d interact, %d adj"%(len(self.interact), len(adj))
for k,c in enumerate(self.child):
#print " Child: " + str(num(k))
c[2].set_ilist(num(k), adj)
else:
# Fix up adj list to use actual translation vectors (to self)
# and remove 'self' point.
for i in range(len(adj)-1, -1, -1):
if all(adj[i][0] == 0):
del adj[i]
continue
adj[i] = (self.ctr - adj[i][1].ctr,) + adj[i][1:]
#print " Leaf, %d items, %d interact, %d adj"%(len(self.x), len(self.interact), len(adj))
self.adj = adj
#print "testing adj:"
#for t,c in self.interact:
# if sqrt(sum(t**2)) < 0.5*sqrt(3.0)*self.R:
# print "A: %f (R = %f)"%(sqrt(sum(t**2)), self.R)
# Do an upward sweep, collecting moments from all children.
def gather(self, G, q):
if self.child != None:
self.M = 0.0
for (t, l,c) in self.child:
self.M += G.oshift(c.gather(G, q[l]), c.R, t, self.R)
#err = abs(G.g_quad(c.M, c.R, t + self.R*G.L.x)
# - G.g_quad(G.oshift(c.M, c.R, t, self.R),
# self.R, self.R*G.L.x)).max()
#if err > 1e-3:
# print " Large M -> M error: %g"%err
# print " self = %d R = %f"%(self.cell, self.R)
# print " C = %d R = %f"%(c.cell, c.R)
# print " t = %f %f %f (%f)"%(t[0], t[1], t[2], sqrt(sum(t*t)))
else:
self.M = G.solve_moments(q, self.x, self.R)
self.q = q
return self.M
# Do a downward sweep, distributing the L-expansion
# for the far field to all children.
def distribute(self, G, L, u, ig_quad, calc):
for (t,c) in self.interact:
L += G.ishift(c.M, c.R, t, self.R) # M -> L[t]
#print c.q.shape, c.x.shape
#err = abs(G.calc(c.q, c.x, t + 0.5*self.R*G.L.x)
# - G.ig_quad(G.ishift(c.M, c.R, t, self.R),
# self.R, 0.5*self.R*G.L.x)).max()
#if err > 1e-6:
# print " Large M -> L error: %g"%err
# print " self = %d R = %f"%(self.cell, self.R)
# print " I = %d R = %f"%(c.cell, c.R)
# print " t = %f %f %f (%f)"%(t[0], t[1], t[2], sqrt(sum(t*t)))
#L += G.solve_moments(c.M, c.R*G.L.x-t, self.R, -1) # M -> L[t]
#L += G.solve_moments(c.q, c.x-t, self.R, -1) # M -> L[t]
if self.child != None:
if isinstance(L, np.ndarray):
for (t,l,c) in self.child:
u[l] = c.distribute(G, G.ishift(L, self.R, -t, c.R), \
u[l], ig_quad, calc) # L[t] -> L
#err = abs(G.ig_quad(L, self.R, 0.5*G.L.x*c.R-t)
# - G.ig_quad(G.ishift(L, self.R, -t, c.R),
# c.R, 0.5*G.L.x*c.R)).max()
#if err > 1e-6:
# print " Large L -> L error: %g"%err
# print " self = %d R = %f"%(self.cell, self.R)
# print " C = %d R = %f"%(c.cell, c.R)
# print " t = %f %f %f (%f)"%(t[0], t[1], t[2], sqrt(sum(t*t)))
else:
for (t,l,c) in self.child:
u[l] = c.distribute(G, L, u[l], ig_quad, calc)
else:
if isinstance(L, np.ndarray):
u += ig_quad(L, self.R, self.x)
for (t,c) in self.adj:
if len(c.q) > 0:
u += calc(c.q, c.x-t, self.x)
for i in range(len(self.x)):
if i > 0:
u[i:i+1] += calc(self.q[:i], self.x[:i], self.x[i:i+1])
if i < len(self.x)-1:
u[i:i+1] += calc(self.q[i+1:], self.x[i+1:], self.x[i:i+1])
return u
# Top-level sweep routine
# f : source strengths
# u : dest. strengths
# ig_quad : moments -> R -> dests (N x 3) -> N x ...
# calc : q -> src (M x 3) -> dests (N x 3) -> N x ...
def sweep(self, G, f, u, ig_quad, calc):
self.gather(G, f)
return self.distribute(G, 0.0, u, ig_quad, calc)
# Top-level neighbor-list routine
def nlist(self):
num = lambda k: array([k/4, (k/2)%2, k%2])
self.interact = []
if self.child != None:
z = np.zeros(3, dtype=np.int)
for k,c in enumerate(self.child):
#print " Child: " + str(num(k))
c[2].set_ilist(num(k), [(z, self)])
else:
self.adj = []
# FMM kernel and solution routines for G f = u using Gmres
# G : FMM-able kernel (contains calc, solve_moments, shift, ginv)
# dof : number of degrees of freedom per src/dst point (= 0 for 1 with no extra dim.)
# ig_quad and calc: moment-based and direct calculation of kernel
# for Poisson, use G.ig_quad, G.calc or G.f_quad and G.calc2
# tree_params : passed (in grid()) as kwargs to otree(x, **tree_params)
# fix : func. to run on result of tree.sweep(G, f, u, ig_quad, calc)
class FMM:
def __init__(self, G, ig_quad, calc, dof=0, fix=None, **tree_params):
self.G = G
self.dof = dof
self.tree_params = tree_params
self.tree = None
self.shape = (0,0)
self.N = 0
self.ig_quad = ig_quad
self.calc = calc
if fix == None:
self.fix = lambda x: x
else:
self.fix = fix
# initialize a new particle position oct-tree
def grid(self, x):
self.tree = otree(x, **self.tree_params)
self.tree.nlist()
self.N = len(x)
if self.dof == 0:
n = self.N
else:
n = self.N*self.dof
self.shape = (n, n)
# Calculate sum_j G(x_i, y_j; f_j) using FMM
# requires self.tree (made by Solver.grid())
def __call__(self, f):
if self.dof > 0:
u = zeros((self.N, self.dof))
self.tree.sweep(self.G, reshape(f, (self.N, self.dof)), \
u, self.ig_quad, self.calc)
else:
u = zeros(self.N)
self.tree.sweep(self.G, f, \
u, self.ig_quad, self.calc)
self.fix(u)
return reshape(u, self.shape[0])
# requires self.tree (made by Solver.grid())
def solve(self, u, **kwargs):
return gmres(self, u, **kwargs)
def test():
N = 10
rand = np.random
G = Poisson(12)
f = FMM(G, G.ig_quad, G.calc, levels=4)
x = rand.random((N,N,N,3))
x[...,0] += arange(N)[:,newaxis,newaxis]
x[...,1] += arange(N)[newaxis,:,newaxis]
x[...,2] += arange(N)[newaxis,newaxis,:]
N = N**3
x = reshape(x, (N,3))*10.0
f.grid(x) # create oct-tree
q = rand.random(N)-0.5
# use FMM to compute potentials
phi = f(q)
phi2 = zeros(phi.shape)
for i in range(N):
if i > 0:
phi2[i:i+1] += G.calc(q[:i], x[:i], x[i:i+1])
if i < N-1:
phi2[i:i+1] += G.calc(q[i+1:], x[i+1:], x[i:i+1])
print sqrt(sum((phi - phi2)**2)/sum(phi2**2))
exit(0)
#d = sqrt(sum((x[:,newaxis,:]-x)**2, 2))
#for i in range(N):
# d[i,i] = 10.0
#
#M = 0.25/pi/d
#for i in range(N):
# M[i,i] = 0.0
#print M.max()
#print np.linalg.eigh(M)[0]
#q3 = np.linalg.solve(M, phi)
#print q3 - q
#phi = rand.random(100) # 100 random potential points
q2 = f.solve(phi, tol=1e-8, no_progress_factor=1.0) # potential that generates 'em
print q
print q2 # statistics of sol'n, etc.
| frobnitzem/realpole | fmm.py | Python | gpl-3.0 | 11,049 |
# coding: utf-8
"""
CERN Unified Storage API
A unified storage API for all data-storage back-ends.
OpenAPI spec version: 2.0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.lock import Lock
class TestLock(unittest.TestCase):
""" Lock unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testLock(self):
"""
Test Lock
"""
model = swagger_client.models.lock.Lock()
if __name__ == '__main__':
unittest.main()
| icot/sapi_client | test/test_lock.py | Python | gpl-3.0 | 720 |
#Copyright ReportLab Europe Ltd. 2000-2008
#see license.txt for license details
__version__=''' $Id: fodyssey.py 3660 2010-02-08 18:17:33Z damian $ '''
__doc__=''
#REPORTLAB_TEST_SCRIPT
import sys, copy, os
from reportlab.platypus import *
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
import reportlab.rl_config
reportlab.rl_config.invariant = 1
styles = getSampleStyleSheet()
Title = "The Odyssey"
Author = "Homer"
def myFirstPage(canvas, doc):
canvas.saveState()
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
def go():
doc = SimpleDocTemplate('fodyssey.pdf',showBoundary='showboundary' in sys.argv)
doc.allowSplitting = not 'nosplitting' in sys.argv
doc.build(Elements,myFirstPage,myLaterPages)
Elements = []
ChapterStyle = copy.copy(styles["Heading1"])
ChapterStyle.alignment = TA_CENTER
ChapterStyle.fontsize = 16
InitialStyle = copy.deepcopy(ChapterStyle)
InitialStyle.fontsize = 16
InitialStyle.leading = 20
PreStyle = styles["Code"]
def newPage():
Elements.append(PageBreak())
def chapter(txt, style=ChapterStyle):
newPage()
Elements.append(Paragraph(txt, style))
Elements.append(Spacer(0.2*inch, 0.3*inch))
def fTitle(txt,style=InitialStyle):
Elements.append(Paragraph(txt, style))
ParaStyle = copy.deepcopy(styles["Normal"])
ParaStyle.spaceBefore = 0.1*inch
if 'right' in sys.argv:
ParaStyle.alignment = TA_RIGHT
elif 'left' in sys.argv:
ParaStyle.alignment = TA_LEFT
elif 'justify' in sys.argv:
ParaStyle.alignment = TA_JUSTIFY
elif 'center' in sys.argv or 'centre' in sys.argv:
ParaStyle.alignment = TA_CENTER
else:
ParaStyle.alignment = TA_JUSTIFY
def spacer(inches):
Elements.append(Spacer(0.1*inch, inches*inch))
def p(txt, style=ParaStyle):
Elements.append(Paragraph(txt, style))
def pre(txt, style=PreStyle):
spacer(0.1)
p = Preformatted(txt, style)
Elements.append(p)
def parseOdyssey(fn):
from time import time
E = []
t0=time()
text = open(fn,'r').read()
i0 = text.index('Book I')
endMarker = 'covenant of peace between the two contending parties.'
i1 = text.index(endMarker)+len(endMarker)
PREAMBLE=map(str.strip,text[0:i0].split('\n'))
L=map(str.strip,text[i0:i1].split('\n'))
POSTAMBLE=map(str.strip,text[i1:].split('\n'))
def ambleText(L):
while L and not L[0]: L.pop(0)
while L:
T=[]
while L and L[0]:
T.append(L.pop(0))
yield T
while L and not L[0]: L.pop(0)
def mainText(L):
while L:
B = L.pop(0)
while not L[0]: L.pop(0)
T=[]
while L and L[0]:
T.append(L.pop(0))
while not L[0]: L.pop(0)
P = []
while L and not (L[0].startswith('Book ') and len(L[0].split())==2):
E=[]
while L and L[0]:
E.append(L.pop(0))
P.append(E)
if L:
while not L[0]: L.pop(0)
yield B,T,P
t1 = time()
print "open(%s,'r').read() took %.4f seconds" %(fn,t1-t0)
E.append([spacer,2])
E.append([fTitle,'<font color=red>%s</font>' % Title, InitialStyle])
E.append([fTitle,'<font size=-4>by</font> <font color=green>%s</font>' % Author, InitialStyle])
for T in ambleText(PREAMBLE):
E.append([p,'\n'.join(T)])
for (B,T,P) in mainText(L):
E.append([chapter,B])
E.append([p,'<font size="+1" color="Blue"><b>%s</b></font>' % '\n'.join(T),ParaStyle])
for x in P:
E.append([p,' '.join(x)])
firstPre = 1
for T in ambleText(POSTAMBLE):
E.append([p,'\n'.join(T)])
t3 = time()
print "Parsing into memory took %.4f seconds" %(t3-t1)
del L
t4 = time()
print "Deleting list of lines took %.4f seconds" %(t4-t3)
for i in xrange(len(E)):
E[i][0](*E[i][1:])
t5 = time()
print "Moving into platypus took %.4f seconds" %(t5-t4)
del E
t6 = time()
print "Deleting list of actions took %.4f seconds" %(t6-t5)
go()
t7 = time()
print "saving to PDF took %.4f seconds" %(t7-t6)
print "Total run took %.4f seconds"%(t7-t0)
for fn in ('odyssey.full.txt','odyssey.txt'):
if os.path.isfile(fn):
break
if __name__=='__main__':
parseOdyssey(fn)
| HM2MC/Webfront | reportlab-2.5/demos/odyssey/fodyssey.py | Python | mit | 4,608 |
#!/usr/bin/python
import time
def web_socket_do_extra_handshake(request):
time.sleep(2)
def web_socket_transfer_data(request):
pass
| nwjs/chromium.src | third_party/wpt_tools/wpt/websockets/handlers/handshake_sleep_2_wsh.py | Python | bsd-3-clause | 143 |
from __future__ import absolute_import
from .ppi import *
| tomazc/orange-bio | orangecontrib/bio/obiPPI.py | Python | gpl-3.0 | 59 |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class gslbsite(base_resource) :
""" Configuration for GSLB site resource. """
def __init__(self) :
self._sitename = ""
self._sitetype = ""
self._siteipaddress = ""
self._publicip = ""
self._metricexchange = ""
self._nwmetricexchange = ""
self._sessionexchange = ""
self._triggermonitor = ""
self._parentsite = ""
self._clip = ""
self._publicclip = ""
self._status = ""
self._persistencemepstatus = ""
self._version = 0
self.___count = 0
@property
def sitename(self) :
"""Name for the GSLB site. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my gslbsite" or 'my gslbsite').<br/>Minimum length = 1.
"""
try :
return self._sitename
except Exception as e:
raise e
@sitename.setter
def sitename(self, sitename) :
"""Name for the GSLB site. Must begin with an ASCII alphanumeric or underscore (_) character, and must contain only ASCII alphanumeric, underscore, hash (#), period (.), space, colon (:), at (@), equals (=), and hyphen (-) characters. Cannot be changed after the virtual server is created.
CLI Users: If the name includes one or more spaces, enclose the name in double or single quotation marks (for example, "my gslbsite" or 'my gslbsite').<br/>Minimum length = 1
"""
try :
self._sitename = sitename
except Exception as e:
raise e
@property
def sitetype(self) :
"""Type of site to create. If the type is not specified, the appliance automatically detects and sets the type on the basis of the IP address being assigned to the site. If the specified site IP address is owned by the appliance (for example, a MIP address or SNIP address), the site is a local site. Otherwise, it is a remote site.<br/>Default value: NONE<br/>Possible values = REMOTE, LOCAL.
"""
try :
return self._sitetype
except Exception as e:
raise e
@sitetype.setter
def sitetype(self, sitetype) :
"""Type of site to create. If the type is not specified, the appliance automatically detects and sets the type on the basis of the IP address being assigned to the site. If the specified site IP address is owned by the appliance (for example, a MIP address or SNIP address), the site is a local site. Otherwise, it is a remote site.<br/>Default value: NONE<br/>Possible values = REMOTE, LOCAL
"""
try :
self._sitetype = sitetype
except Exception as e:
raise e
@property
def siteipaddress(self) :
"""IP address for the GSLB site. The GSLB site uses this IP address to communicate with other GSLB sites. For a local site, use any IP address that is owned by the appliance (for example, a SNIP or MIP address, or the IP address of the ADNS service).<br/>Minimum length = 1.
"""
try :
return self._siteipaddress
except Exception as e:
raise e
@siteipaddress.setter
def siteipaddress(self, siteipaddress) :
"""IP address for the GSLB site. The GSLB site uses this IP address to communicate with other GSLB sites. For a local site, use any IP address that is owned by the appliance (for example, a SNIP or MIP address, or the IP address of the ADNS service).<br/>Minimum length = 1
"""
try :
self._siteipaddress = siteipaddress
except Exception as e:
raise e
@property
def publicip(self) :
"""Public IP address for the local site. Required only if the appliance is deployed in a private address space and the site has a public IP address hosted on an external firewall or a NAT device.<br/>Minimum length = 1.
"""
try :
return self._publicip
except Exception as e:
raise e
@publicip.setter
def publicip(self, publicip) :
"""Public IP address for the local site. Required only if the appliance is deployed in a private address space and the site has a public IP address hosted on an external firewall or a NAT device.<br/>Minimum length = 1
"""
try :
self._publicip = publicip
except Exception as e:
raise e
@property
def metricexchange(self) :
"""Exchange metrics with other sites. Metrics are exchanged by using Metric Exchange Protocol (MEP). The appliances in the GSLB setup exchange health information once every second.
If you disable metrics exchange, you can use only static load balancing methods (such as round robin, static proximity, or the hash-based methods), and if you disable metrics exchange when a dynamic load balancing method (such as least connection) is in operation, the appliance falls back to round robin. Also, if you disable metrics exchange, you must use a monitor to determine the state of GSLB services. Otherwise, the service is marked as DOWN.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._metricexchange
except Exception as e:
raise e
@metricexchange.setter
def metricexchange(self, metricexchange) :
"""Exchange metrics with other sites. Metrics are exchanged by using Metric Exchange Protocol (MEP). The appliances in the GSLB setup exchange health information once every second.
If you disable metrics exchange, you can use only static load balancing methods (such as round robin, static proximity, or the hash-based methods), and if you disable metrics exchange when a dynamic load balancing method (such as least connection) is in operation, the appliance falls back to round robin. Also, if you disable metrics exchange, you must use a monitor to determine the state of GSLB services. Otherwise, the service is marked as DOWN.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._metricexchange = metricexchange
except Exception as e:
raise e
@property
def nwmetricexchange(self) :
"""Exchange, with other GSLB sites, network metrics such as round-trip time (RTT), learned from communications with various local DNS (LDNS) servers used by clients. RTT information is used in the dynamic RTT load balancing method, and is exchanged every 5 seconds.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._nwmetricexchange
except Exception as e:
raise e
@nwmetricexchange.setter
def nwmetricexchange(self, nwmetricexchange) :
"""Exchange, with other GSLB sites, network metrics such as round-trip time (RTT), learned from communications with various local DNS (LDNS) servers used by clients. RTT information is used in the dynamic RTT load balancing method, and is exchanged every 5 seconds.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._nwmetricexchange = nwmetricexchange
except Exception as e:
raise e
@property
def sessionexchange(self) :
"""Exchange persistent session entries with other GSLB sites every five seconds.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._sessionexchange
except Exception as e:
raise e
@sessionexchange.setter
def sessionexchange(self, sessionexchange) :
"""Exchange persistent session entries with other GSLB sites every five seconds.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._sessionexchange = sessionexchange
except Exception as e:
raise e
@property
def triggermonitor(self) :
"""Specify the conditions under which the GSLB service must be monitored by a monitor, if one is bound. Available settings function as follows:
* ALWAYS - Monitor the GSLB service at all times.
* MEPDOWN - Monitor the GSLB service only when the exchange of metrics through the Metrics Exchange Protocol (MEP) is disabled.
MEPDOWN_SVCDOWN - Monitor the service in either of the following situations:
* The exchange of metrics through MEP is disabled.
* The exchange of metrics through MEP is enabled but the status of the service, learned through metrics exchange, is DOWN.<br/>Default value: ALWAYS<br/>Possible values = ALWAYS, MEPDOWN, MEPDOWN_SVCDOWN.
"""
try :
return self._triggermonitor
except Exception as e:
raise e
@triggermonitor.setter
def triggermonitor(self, triggermonitor) :
"""Specify the conditions under which the GSLB service must be monitored by a monitor, if one is bound. Available settings function as follows:
* ALWAYS - Monitor the GSLB service at all times.
* MEPDOWN - Monitor the GSLB service only when the exchange of metrics through the Metrics Exchange Protocol (MEP) is disabled.
MEPDOWN_SVCDOWN - Monitor the service in either of the following situations:
* The exchange of metrics through MEP is disabled.
* The exchange of metrics through MEP is enabled but the status of the service, learned through metrics exchange, is DOWN.<br/>Default value: ALWAYS<br/>Possible values = ALWAYS, MEPDOWN, MEPDOWN_SVCDOWN
"""
try :
self._triggermonitor = triggermonitor
except Exception as e:
raise e
@property
def parentsite(self) :
"""Parent site of the GSLB site, in a parent-child topology.
"""
try :
return self._parentsite
except Exception as e:
raise e
@parentsite.setter
def parentsite(self, parentsite) :
"""Parent site of the GSLB site, in a parent-child topology.
"""
try :
self._parentsite = parentsite
except Exception as e:
raise e
@property
def clip(self) :
"""Cluster IP used to connect to remote cluster site for GSLB autosync.
"""
try :
return self._clip
except Exception as e:
raise e
@clip.setter
def clip(self, clip) :
"""Cluster IP used to connect to remote cluster site for GSLB autosync.
"""
try :
self._clip = clip
except Exception as e:
raise e
@property
def publicclip(self) :
"""Public cluster IP used to connect to remote cluster site for GSLB autosync if the remote cluster is behind a NAT.
"""
try :
return self._publicclip
except Exception as e:
raise e
@publicclip.setter
def publicclip(self, publicclip) :
"""Public cluster IP used to connect to remote cluster site for GSLB autosync if the remote cluster is behind a NAT.
"""
try :
self._publicclip = publicclip
except Exception as e:
raise e
@property
def status(self) :
"""Current metric exchange status.<br/>Possible values = ACTIVE, INACTIVE, DOWN.
"""
try :
return self._status
except Exception as e:
raise e
@property
def persistencemepstatus(self) :
"""Network metric and persistence exchange MEP connection status.<br/>Possible values = ACTIVE, INACTIVE, DOWN.
"""
try :
return self._persistencemepstatus
except Exception as e:
raise e
@property
def version(self) :
"""will be true if the remote site's version is ncore compatible with the local site.(>= 9.2).
"""
try :
return self._version
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(gslbsite_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.gslbsite
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.sitename) :
return str(self.sitename)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
""" Use this API to add gslbsite.
"""
try :
if type(resource) is not list :
addresource = gslbsite()
addresource.sitename = resource.sitename
addresource.sitetype = resource.sitetype
addresource.siteipaddress = resource.siteipaddress
addresource.publicip = resource.publicip
addresource.metricexchange = resource.metricexchange
addresource.nwmetricexchange = resource.nwmetricexchange
addresource.sessionexchange = resource.sessionexchange
addresource.triggermonitor = resource.triggermonitor
addresource.parentsite = resource.parentsite
addresource.clip = resource.clip
addresource.publicclip = resource.publicclip
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ gslbsite() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].sitename = resource[i].sitename
addresources[i].sitetype = resource[i].sitetype
addresources[i].siteipaddress = resource[i].siteipaddress
addresources[i].publicip = resource[i].publicip
addresources[i].metricexchange = resource[i].metricexchange
addresources[i].nwmetricexchange = resource[i].nwmetricexchange
addresources[i].sessionexchange = resource[i].sessionexchange
addresources[i].triggermonitor = resource[i].triggermonitor
addresources[i].parentsite = resource[i].parentsite
addresources[i].clip = resource[i].clip
addresources[i].publicclip = resource[i].publicclip
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
""" Use this API to delete gslbsite.
"""
try :
if type(resource) is not list :
deleteresource = gslbsite()
if type(resource) != type(deleteresource):
deleteresource.sitename = resource
else :
deleteresource.sitename = resource.sitename
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ gslbsite() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].sitename = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ gslbsite() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].sitename = resource[i].sitename
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update gslbsite.
"""
try :
if type(resource) is not list :
updateresource = gslbsite()
updateresource.sitename = resource.sitename
updateresource.metricexchange = resource.metricexchange
updateresource.nwmetricexchange = resource.nwmetricexchange
updateresource.sessionexchange = resource.sessionexchange
updateresource.triggermonitor = resource.triggermonitor
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ gslbsite() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].sitename = resource[i].sitename
updateresources[i].metricexchange = resource[i].metricexchange
updateresources[i].nwmetricexchange = resource[i].nwmetricexchange
updateresources[i].sessionexchange = resource[i].sessionexchange
updateresources[i].triggermonitor = resource[i].triggermonitor
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of gslbsite resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = gslbsite()
if type(resource) != type(unsetresource):
unsetresource.sitename = resource
else :
unsetresource.sitename = resource.sitename
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ gslbsite() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].sitename = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ gslbsite() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].sitename = resource[i].sitename
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the gslbsite resources that are configured on netscaler.
"""
try :
if not name :
obj = gslbsite()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = gslbsite()
obj.sitename = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [gslbsite() for _ in range(len(name))]
obj = [gslbsite() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = gslbsite()
obj[i].sitename = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
""" Use this API to fetch filtered set of gslbsite resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = gslbsite()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
""" Use this API to count the gslbsite resources configured on NetScaler.
"""
try :
obj = gslbsite()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
""" Use this API to count filtered the set of gslbsite resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = gslbsite()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Sessionexchange:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Nwmetricexchange:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Status:
ACTIVE = "ACTIVE"
INACTIVE = "INACTIVE"
DOWN = "DOWN"
class Triggermonitor:
ALWAYS = "ALWAYS"
MEPDOWN = "MEPDOWN"
MEPDOWN_SVCDOWN = "MEPDOWN_SVCDOWN"
class Persistencemepstatus:
ACTIVE = "ACTIVE"
INACTIVE = "INACTIVE"
DOWN = "DOWN"
class Metricexchange:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class Sitetype:
REMOTE = "REMOTE"
LOCAL = "LOCAL"
class gslbsite_response(base_response) :
def __init__(self, length=1) :
self.gslbsite = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.gslbsite = [gslbsite() for _ in range(length)]
| mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/gslb/gslbsite.py | Python | apache-2.0 | 20,403 |
import abjad
def test_get_has_effective_indicator_01():
staff = abjad.Staff("c'8 d'8 e'8 f'8")
abjad.attach("foo", staff[2], context="Staff")
assert not abjad.get.has_effective_indicator(staff, str)
assert not abjad.get.has_effective_indicator(staff[0], str)
assert not abjad.get.has_effective_indicator(staff[1], str)
assert abjad.get.has_effective_indicator(staff[2], str)
assert abjad.get.has_effective_indicator(staff[3], str)
| Abjad/abjad | tests/test_get_has_effective_indicator.py | Python | gpl-3.0 | 463 |
from . import globalvars
import os
import shutil
from uuid import uuid4
from sqlalchemy import (
Column,
Text,
ARRAY,
)
from sqlalchemy.orm import Query
from .func import get_validator_status
from .func import get_image_writer
from sqlalchemy.event import listen
from sqlalchemy.orm import Session
class BaseImage(object):
"""Base image class to be used as Image entity's parent.
Attributes:
basename(str):
The base file name in ``temporary`` folder or the base folder name
in the ``persistent`` folder.
image_type(str):
Image codec name for ``imageio``.
extension(str):
Image file extension in the ``persistent`` folder.
Properties:
available_sizes(list(tuple(int, int))):
Available/Pending to be written image sizes/thumbnails in the
persistent folder.
"""
basename = Column(Text, nullable=False, unique=True)
_available_sizes = Column(ARRAY(Text), nullable=False)
image_type = Column(Text, nullable=False)
extension = Column(Text, nullable=False)
def from_file_obj(self,
file_obj,
validate=True):
"""Tries to download the file to ``temporary`` folder.
Returns:
the full file path of the ``temporary`` file if succeeds.
else ``None``.
"""
if globalvars.image_storage is None:
return None
# Find a suitable non-existent file name
filename = None
basename = None
for cnt in range(0, 10):
basename = uuid4().hex
filename = os.path.join(
globalvars.image_storage.persistent,
basename,
)
if not os.path.exists(filename):
filename = os.path.join(
globalvars.image_storage.temporary,
basename,
)
break
else:
return None
# Try to downlaod the file object.
try:
with open(filename, 'wb') as output_file:
shutil.copyfileobj(file_obj, output_file)
except Exception as err:
if os.path.exists(filename):
os.remove(filename)
return None
if validate:
vstatus = self.validate(temp_filename=filename)
if vstatus is not None:
self.basename = basename
else:
if os.path.exists(filename):
os.remove(filename)
return None
return filename
def validate(self, temp_filename=None):
vstatus = get_validator_status(temp_filename)
if vstatus is not None:
if self._available_sizes is None:
self._available_sizes = list()
self._available_sizes.append(
'{}x{}'.format(
vstatus.size[0],
vstatus.size[1]
)
)
self.image_type = vstatus.type
self.extension = vstatus.extension
return vstatus
@property
def available_sizes(self):
return [
(
int(size.split('x')[0]),
int(size.split('x')[1])
) for size in self._available_sizes
]
@property
def max_size(self):
m_size = (0, 0)
for size in self.available_sizes:
if (m_size[0] * m_size[1]) < (size[0] * size[1]):
m_size = size
return m_size
def get_url(self, size=None):
if not size:
size = self.available_sizes[0]
image_filename = '{}x{}.{}'.format(
size[0],
size[1],
self.extension
)
# /persistent_path/basename/image_size.ext
return os.path.join(
globalvars.image_storage.persistent,
self.basename,
image_filename,
)
def add_thumbnail(self, size=None):
if size:
self._available_sizes.append(
'{}x{}'.format(
size[0],
size[1],
)
)
class BaseImageQuery(Query):
"""Query class to track all ``image`` instances."""
_images_to_write = list()
_images_to_delete = list()
@classmethod
def after_insert(cls, mapper, connection, target):
cls._images_to_write.append(target)
@classmethod
def after_update(cls, mapper, connection, target):
cls._images_to_write.append(target)
@classmethod
def after_delete(cls, mapper, connection, target):
cls._images_to_delete.append(target)
@classmethod
def after_soft_rollback(cls, session, previous_transaction):
for img in cls._images_to_write:
writer = get_image_writer(img)
writer.clean_up()
for img in cls._images_to_delete:
writer = get_image_writer(img)
writer.clean_up()
cls._images_to_write.clear()
cls._images_to_delete.clear()
@classmethod
def after_commit(cls, session):
for img in cls._images_to_write:
writer = get_image_writer(img)
writer.write_all()
for img in cls._images_to_delete:
writer = get_image_writer(img)
writer.delete_all()
cls._images_to_write.clear()
cls._images_to_delete.clear()
def register_listeners():
listen(BaseImage, 'after_insert', BaseImageQuery.after_insert, propagate=True)
listen(BaseImage, 'after_update', BaseImageQuery.after_update, propagate=True)
listen(BaseImage, 'after_delete', BaseImageQuery.after_delete, propagate=True)
listen(Session, 'after_soft_rollback', BaseImageQuery.after_soft_rollback)
listen(Session, 'after_commit', BaseImageQuery.after_commit)
| asif-mahmud/Pyramid-Apps | imageupload/imageupload/models/storage/image/base.py | Python | gpl-2.0 | 5,890 |
"""The tests for the Rfxtrx sensor platform."""
import pytest
from homeassistant.components.rfxtrx import DOMAIN
from homeassistant.components.rfxtrx.const import ATTR_EVENT
from homeassistant.core import State
from tests.common import MockConfigEntry, mock_restore_cache
from tests.components.rfxtrx.conftest import create_rfx_test_cfg
EVENT_SMOKE_DETECTOR_PANIC = "08200300a109000670"
EVENT_SMOKE_DETECTOR_NO_PANIC = "08200300a109000770"
EVENT_MOTION_DETECTOR_MOTION = "08200100a109000470"
EVENT_MOTION_DETECTOR_NO_MOTION = "08200100a109000570"
EVENT_LIGHT_DETECTOR_LIGHT = "08200100a109001570"
EVENT_LIGHT_DETECTOR_DARK = "08200100a109001470"
EVENT_AC_118CDEA_2_ON = "0b1100100118cdea02010f70"
async def test_one(hass, rfxtrx):
"""Test with 1 sensor."""
entry_data = create_rfx_test_cfg(devices={"0b1100cd0213c7f230010f71": {}})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.ac_213c7f2_48")
assert state
assert state.state == "off"
assert state.attributes.get("friendly_name") == "AC 213c7f2:48"
async def test_one_pt2262(hass, rfxtrx):
"""Test with 1 sensor."""
entry_data = create_rfx_test_cfg(
devices={
"0913000022670e013970": {
"data_bits": 4,
"command_on": 0xE,
"command_off": 0x7,
}
}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state
assert state.state == "off" # probably aught to be unknown
assert state.attributes.get("friendly_name") == "PT2262 22670e"
await rfxtrx.signal("0913000022670e013970")
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state.state == "on"
await rfxtrx.signal("09130000226707013d70")
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state.state == "off"
async def test_pt2262_unconfigured(hass, rfxtrx):
"""Test with discovery for PT2262."""
entry_data = create_rfx_test_cfg(
devices={"0913000022670e013970": {}, "09130000226707013970": {}}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state
assert state.state == "off" # probably aught to be unknown
assert state.attributes.get("friendly_name") == "PT2262 22670e"
state = hass.states.get("binary_sensor.pt2262_226707")
assert state
assert state.state == "off" # probably aught to be unknown
assert state.attributes.get("friendly_name") == "PT2262 226707"
@pytest.mark.parametrize(
"state,event",
[["on", "0b1100cd0213c7f230010f71"], ["off", "0b1100cd0213c7f230000f71"]],
)
async def test_state_restore(hass, rfxtrx, state, event):
"""State restoration."""
entity_id = "binary_sensor.ac_213c7f2_48"
mock_restore_cache(hass, [State(entity_id, state, attributes={ATTR_EVENT: event})])
entry_data = create_rfx_test_cfg(devices={"0b1100cd0213c7f230010f71": {}})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == state
async def test_several(hass, rfxtrx):
"""Test with 3."""
entry_data = create_rfx_test_cfg(
devices={
"0b1100cd0213c7f230010f71": {},
"0b1100100118cdea02010f70": {},
"0b1100100118cdea03010f70": {},
}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.ac_213c7f2_48")
assert state
assert state.state == "off"
assert state.attributes.get("friendly_name") == "AC 213c7f2:48"
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
assert state.attributes.get("friendly_name") == "AC 118cdea:2"
state = hass.states.get("binary_sensor.ac_118cdea_3")
assert state
assert state.state == "off"
assert state.attributes.get("friendly_name") == "AC 118cdea:3"
# "2: Group on"
await rfxtrx.signal("0b1100100118cdea03040f70")
assert hass.states.get("binary_sensor.ac_118cdea_2").state == "on"
assert hass.states.get("binary_sensor.ac_118cdea_3").state == "on"
# "2: Group off"
await rfxtrx.signal("0b1100100118cdea03030f70")
assert hass.states.get("binary_sensor.ac_118cdea_2").state == "off"
assert hass.states.get("binary_sensor.ac_118cdea_3").state == "off"
async def test_discover(hass, rfxtrx_automatic):
"""Test with discovery."""
rfxtrx = rfxtrx_automatic
await rfxtrx.signal("0b1100100118cdea02010f70")
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await rfxtrx.signal("0b1100100118cdeb02010f70")
state = hass.states.get("binary_sensor.ac_118cdeb_2")
assert state
assert state.state == "on"
async def test_off_delay_restore(hass, rfxtrx):
"""Make sure binary sensor restore as off, if off delay is active."""
mock_restore_cache(
hass,
[
State(
"binary_sensor.ac_118cdea_2",
"on",
attributes={ATTR_EVENT: EVENT_AC_118CDEA_2_ON},
)
],
)
entry_data = create_rfx_test_cfg(devices={EVENT_AC_118CDEA_2_ON: {"off_delay": 5}})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
async def test_off_delay(hass, rfxtrx, timestep):
"""Test with discovery."""
entry_data = create_rfx_test_cfg(
devices={"0b1100100118cdea02010f70": {"off_delay": 5}}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
await rfxtrx.signal("0b1100100118cdea02010f70")
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await timestep(4)
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await timestep(4)
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
await rfxtrx.signal("0b1100100118cdea02010f70")
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await timestep(3)
await rfxtrx.signal("0b1100100118cdea02010f70")
await timestep(4)
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "on"
await timestep(4)
state = hass.states.get("binary_sensor.ac_118cdea_2")
assert state
assert state.state == "off"
async def test_panic(hass, rfxtrx_automatic):
"""Test panic entities."""
rfxtrx = rfxtrx_automatic
entity_id = "binary_sensor.kd101_smoke_detector_a10900_32"
await rfxtrx.signal(EVENT_SMOKE_DETECTOR_PANIC)
assert hass.states.get(entity_id).state == "on"
assert hass.states.get(entity_id).attributes.get("device_class") == "smoke"
await rfxtrx.signal(EVENT_SMOKE_DETECTOR_NO_PANIC)
assert hass.states.get(entity_id).state == "off"
async def test_motion(hass, rfxtrx_automatic):
"""Test motion entities."""
rfxtrx = rfxtrx_automatic
entity_id = "binary_sensor.x10_security_motion_detector_a10900_32"
await rfxtrx.signal(EVENT_MOTION_DETECTOR_MOTION)
assert hass.states.get(entity_id).state == "on"
assert hass.states.get(entity_id).attributes.get("device_class") == "motion"
await rfxtrx.signal(EVENT_MOTION_DETECTOR_NO_MOTION)
assert hass.states.get(entity_id).state == "off"
async def test_light(hass, rfxtrx_automatic):
"""Test light entities."""
rfxtrx = rfxtrx_automatic
entity_id = "binary_sensor.x10_security_motion_detector_a10900_32"
await rfxtrx.signal(EVENT_LIGHT_DETECTOR_LIGHT)
assert hass.states.get(entity_id).state == "on"
await rfxtrx.signal(EVENT_LIGHT_DETECTOR_DARK)
assert hass.states.get(entity_id).state == "off"
async def test_pt2262_duplicate_id(hass, rfxtrx):
"""Test with 1 sensor."""
entry_data = create_rfx_test_cfg(
devices={
"0913000022670e013970": {
"data_bits": 4,
"command_on": 0xE,
"command_off": 0x7,
},
"09130000226707013970": {
"data_bits": 4,
"command_on": 0xE,
"command_off": 0x7,
},
}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("binary_sensor.pt2262_22670e")
assert state
assert state.state == "off" # probably aught to be unknown
assert state.attributes.get("friendly_name") == "PT2262 22670e"
| jawilson/home-assistant | tests/components/rfxtrx/test_binary_sensor.py | Python | apache-2.0 | 10,280 |
# -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
from django.core.cache import caches
from django.http import HttpResponse
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django_jinja.backend import Jinja2
from mock import call, Mock, patch
from pyquery import PyQuery as pq
from bedrock.base.urlresolvers import reverse
from bedrock.firefox import views as fx_views
from bedrock.firefox.firefox_details import FirefoxDesktop, FirefoxAndroid
from bedrock.mozorg.tests import TestCase
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'test_data')
PROD_DETAILS_DIR = os.path.join(TEST_DATA_DIR, 'product_details_json')
GOOD_PLATS = {'Windows': {}, 'OS X': {}, 'Linux': {}}
jinja_env = Jinja2.get_default().env
class TestInstallerHelp(TestCase):
def setUp(self):
self.button_mock = Mock()
self.patcher = patch.dict(jinja_env.globals,
download_firefox=self.button_mock)
self.patcher.start()
self.view_name = 'firefox.installer-help'
with self.activate('en-US'):
self.url = reverse(self.view_name)
def tearDown(self):
self.patcher.stop()
def test_buttons_use_lang(self):
"""
The buttons should use the lang from the query parameter.
"""
self.client.get(self.url, {
'installer_lang': 'fr'
})
self.button_mock.assert_has_calls([
call(force_direct=True, force_full_installer=True, locale='fr'),
call('beta', force_direct=True, force_full_installer=True, locale='fr'),
call('alpha', force_direct=True, force_full_installer=True, locale='fr', platform='desktop'),
])
def test_buttons_ignore_non_lang(self):
"""
The buttons should ignore an invalid lang.
"""
self.client.get(self.url, {
'installer_lang': 'not-a-locale'
})
self.button_mock.assert_has_calls([
call(force_direct=True, force_full_installer=True, locale=None),
call('beta', force_direct=True, force_full_installer=True, locale=None),
call('alpha', force_direct=True, force_full_installer=True, locale=None, platform='desktop'),
])
def test_invalid_channel_specified(self):
"""
All buttons should show when channel is invalid.
"""
self.client.get(self.url, {
'channel': 'dude',
})
self.button_mock.assert_has_calls([
call(force_direct=True, force_full_installer=True, locale=None),
call('beta', force_direct=True, force_full_installer=True, locale=None),
call('alpha', force_direct=True, force_full_installer=True, locale=None, platform='desktop'),
])
def test_one_button_when_channel_specified(self):
"""
There should be only one button when the channel is given.
"""
self.client.get(self.url, {
'channel': 'beta',
})
self.button_mock.assert_called_once_with('beta', force_direct=True,
force_full_installer=True,
locale=None)
class TestFirefoxAll(TestCase):
pd_cache = caches['product-details']
def setUp(self):
self.pd_cache.clear()
self.firefox_desktop = FirefoxDesktop(json_dir=PROD_DETAILS_DIR)
self.firefox_android = FirefoxAndroid(json_dir=PROD_DETAILS_DIR)
self.patcher = patch.object(
fx_views, 'firefox_desktop', self.firefox_desktop)
self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_all_builds_results(self):
"""
The unified page should display builds for all products
"""
resp = self.client.get(reverse('firefox.all'))
doc = pq(resp.content)
assert len(doc('.c-all-downloads-build')) == 8
desktop_release_builds = len(self.firefox_desktop.get_filtered_full_builds('release'))
assert len(doc('.c-locale-list[data-product="desktop_release"] > li')) == desktop_release_builds
assert len(doc('.c-locale-list[data-product="desktop_release"] > li[data-language="en-US"] > ul > li > a')) == 7
desktop_beta_builds = len(self.firefox_desktop.get_filtered_full_builds('beta'))
assert len(doc('.c-locale-list[data-product="desktop_beta"] > li')) == desktop_beta_builds
assert len(doc('.c-locale-list[data-product="desktop_beta"] > li[data-language="en-US"] > ul > li > a')) == 7
desktop_developer_builds = len(self.firefox_desktop.get_filtered_full_builds('alpha'))
assert len(doc('.c-locale-list[data-product="desktop_developer"] > li')) == desktop_developer_builds
assert len(doc('.c-locale-list[data-product="desktop_developer"] > li[data-language="en-US"] > ul > li > a')) == 7
desktop_nightly_builds = len(self.firefox_desktop.get_filtered_full_builds('nightly'))
assert len(doc('.c-locale-list[data-product="desktop_nightly"] > li')) == desktop_nightly_builds
assert len(doc('.c-locale-list[data-product="desktop_nightly"] > li[data-language="en-US"] > ul > li > a')) == 7
desktop_esr_builds = len(self.firefox_desktop.get_filtered_full_builds('esr'))
assert len(doc('.c-locale-list[data-product="desktop_esr"] > li')) == desktop_esr_builds
assert len(doc('.c-locale-list[data-product="desktop_esr"] > li[data-language="en-US"] > ul > li > a')) == 7
android_release_builds = len(self.firefox_android.get_filtered_full_builds('release'))
assert len(doc('.c-locale-list[data-product="android_release"] > li')) == android_release_builds
assert len(doc('.c-locale-list[data-product="android_release"] > li[data-language="multi"] > ul > li > a')) == 2
android_beta_builds = len(self.firefox_android.get_filtered_full_builds('beta'))
assert len(doc('.c-locale-list[data-product="android_beta"] > li')) == android_beta_builds
assert len(doc('.c-locale-list[data-product="android_beta"] > li[data-language="multi"] > ul > li > a')) == 2
android_nightly_builds = len(self.firefox_android.get_filtered_full_builds('nightly'))
assert len(doc('.c-locale-list[data-product="android_nightly"] > li')) == android_nightly_builds
assert len(doc('.c-locale-list[data-product="android_nightly"] > li[data-language="multi"] > ul > li > a')) == 2
def test_no_locale_details(self):
"""
When a localized build has been added to the Firefox details while the
locale details are not updated yet, the filtered build list should not
include the localized build.
"""
builds = self.firefox_desktop.get_filtered_full_builds('release')
assert 'uz' in self.firefox_desktop.firefox_primary_builds
assert 'uz' not in self.firefox_desktop.languages
assert len([build for build in builds if build['locale'] == 'uz']) == 0
@patch('bedrock.firefox.views.l10n_utils.render', return_value=HttpResponse())
class TestWhatsNew(TestCase):
def setUp(self):
self.view = fx_views.WhatsnewView.as_view()
self.rf = RequestFactory(HTTP_USER_AGENT='Firefox')
# begin context variable tests
@override_settings(DEV=True)
def test_context_variables_whatsnew(self, render_mock):
"""Should pass the correct context variables"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='72.0')
template = render_mock.call_args[0][1]
ctx = render_mock.call_args[0][2]
assert template == ['firefox/whatsnew/whatsnew-fx71.html']
assert ctx['version'] == '72.0'
assert ctx['analytics_version'] == '72'
assert ctx['entrypoint'] == 'mozilla.org-whatsnew72'
assert ctx['campaign'] == 'whatsnew72'
assert ctx['utm_params'] == ('utm_source=mozilla.org-whatsnew72&utm_medium=referral'
'&utm_campaign=whatsnew72&entrypoint=mozilla.org-whatsnew72')
@override_settings(DEV=True)
def test_context_variables_whatsnew_beta(self, render_mock):
"""Should pass the correct context variables for beta channel"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='72.0beta')
template = render_mock.call_args[0][1]
ctx = render_mock.call_args[0][2]
assert template == ['firefox/whatsnew/whatsnew-fx70-en.html']
assert ctx['version'] == '72.0beta'
assert ctx['analytics_version'] == '72beta'
assert ctx['entrypoint'] == 'mozilla.org-whatsnew72beta'
assert ctx['campaign'] == 'whatsnew72beta'
assert ctx['utm_params'] == ('utm_source=mozilla.org-whatsnew72beta&utm_medium=referral'
'&utm_campaign=whatsnew72beta&entrypoint=mozilla.org-whatsnew72beta')
@override_settings(DEV=True)
def test_context_variables_whatsnew_developer(self, render_mock):
"""Should pass the correct context variables for developer channel"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='72.0a2')
template = render_mock.call_args[0][1]
ctx = render_mock.call_args[0][2]
assert template == ['firefox/developer/whatsnew.html']
assert ctx['version'] == '72.0a2'
assert ctx['analytics_version'] == '72developer'
assert ctx['entrypoint'] == 'mozilla.org-whatsnew72developer'
assert ctx['campaign'] == 'whatsnew72developer'
assert ctx['utm_params'] == ('utm_source=mozilla.org-whatsnew72developer&utm_medium=referral'
'&utm_campaign=whatsnew72developer&entrypoint=mozilla.org-whatsnew72developer')
@override_settings(DEV=True)
def test_context_variables_whatsnew_nightly(self, render_mock):
"""Should pass the correct context variables for nightly channel"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='72.0a1')
template = render_mock.call_args[0][1]
ctx = render_mock.call_args[0][2]
assert template == ['firefox/nightly_whatsnew.html']
assert ctx['version'] == '72.0a1'
assert ctx['analytics_version'] == '72nightly'
assert ctx['entrypoint'] == 'mozilla.org-whatsnew72nightly'
assert ctx['campaign'] == 'whatsnew72nightly'
assert ctx['utm_params'] == ('utm_source=mozilla.org-whatsnew72nightly&utm_medium=referral'
'&utm_campaign=whatsnew72nightly&entrypoint=mozilla.org-whatsnew72nightly')
# end context variable tests
# begin nightly whatsnew tests
@override_settings(DEV=True)
def test_fx_nightly_68_0_a1_whatsnew(self, render_mock):
"""Should show nightly whatsnew template"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='68.0a1')
template = render_mock.call_args[0][1]
assert template == ['firefox/nightly_whatsnew.html']
# end nightly whatsnew tests
# begin beta whatsnew tests
@override_settings(DEV=True)
def test_fx_72beta_whatsnew(self, render_mock):
"""Should show Fx70 whatsnew template"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='72.0beta')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx70-en.html']
@override_settings(DEV=True)
def test_fx_oldbeta_whatsnew(self, render_mock):
"""Should show default whatsnew template"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='71.0beta')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index.html']
# end beta whatsnew tests
# begin dev edition whatsnew tests
@override_settings(DEV=True)
def test_fx_dev_browser_35_0_a2_whatsnew(self, render_mock):
"""Should show default whatsnew template"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='35.0a2')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index.html']
@override_settings(DEV=True)
def test_fx_dev_browser_57_0_a2_whatsnew(self, render_mock):
"""Should show dev browser 57 whatsnew template"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='57.0a2')
template = render_mock.call_args[0][1]
assert template == ['firefox/developer/whatsnew.html']
@override_settings(DEV=True)
@patch.dict(os.environ, SWITCH_DEV_WHATSNEW_68='False')
def test_fx_dev_browser_68_0_a2_whatsnew_off(self, render_mock):
"""Should show regular dev browser whatsnew template"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='68.0a2')
template = render_mock.call_args[0][1]
assert template == ['firefox/developer/whatsnew.html']
# end dev edition whatsnew tests
@override_settings(DEV=True)
def test_rv_prefix(self, render_mock):
"""Prefixed oldversion shouldn't impact version sniffing."""
req = self.rf.get('/en-US/firefox/whatsnew/?oldversion=rv:10.0')
self.view(req, version='54.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index.html']
@override_settings(DEV=True)
@patch.object(fx_views, 'ftl_file_is_active', lambda *x: True)
def test_fx_default_whatsnew_sync(self, render_mock):
"""Should use sync template for 60.0"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='60.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index-account.html']
@override_settings(DEV=True)
@patch.object(fx_views, 'ftl_file_is_active', lambda *x: False)
def test_fx_default_whatsnew_fallback(self, render_mock):
"""Should use standard template for 60.0 as fallback"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='60.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index.html']
@override_settings(DEV=True)
@patch.object(fx_views, 'ftl_file_is_active', lambda *x: True)
def test_fx_default_whatsnew(self, render_mock):
"""Should use standard template for 59.0"""
req = self.rf.get('/en-US/firefox/whatsnew/')
self.view(req, version='59.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index.html']
# begin id locale-specific tests
@override_settings(DEV=True)
def test_id_locale_template_lite(self, render_mock):
"""Should use id locale specific template for Firefox Lite"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'id'
self.view(req, version='63.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index-lite.id.html']
# end id locale-specific tests
# begin 67.0.5 whatsnew tests
def test_fx_67_0_1(self, render_mock):
"""Should use trailhead template for 67.0.1"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'en-US'
self.view(req, version='67.0.1')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx67.0.5.html']
def test_fx_67_0_1_locales(self, render_mock):
"""Should use standard template for 67.0.1 for other locales"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'es-ES'
self.view(req, version='67.0.1')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx67.html']
# end 67.0.5 whatsnew tests
# begin 68.0 whatsnew tests
def test_fx_68_0(self, render_mock):
"""Should use trailhead template for 68.0"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'en-US'
self.view(req, version='68.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx68-trailhead.html']
def test_fx_68_0_locales(self, render_mock):
"""Should use standard template for 68.0 for other locales"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'es-ES'
self.view(req, version='68.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx68.html']
# end 68.0 whatsnew tests
# begin 69.0 whatsnew tests
def test_fx_69_0(self, render_mock):
"""Should use whatsnew-69 template for 69.0"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'en-US'
self.view(req, version='69.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx69.html']
# end 69.0 whatsnew tests
# begin 70.0 whatsnew tests
def test_fx_70_0_en(self, render_mock):
"""Should use whatsnew-70-en template for 70.0 for en- locales"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'en-US'
self.view(req, version='70.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx70-en.html']
def test_fx_70_0_de(self, render_mock):
"""Should use whatsnew-70-de template for 70.0 in de locale"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'de'
self.view(req, version='70.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx70-de.html']
def test_fx_70_0_fr(self, render_mock):
"""Should use whatsnew-70-fr template for 70.0 in fr locale"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'fr'
self.view(req, version='70.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx70-fr.html']
def test_fx_70_0(self, render_mock):
"""Should use default whatsnew template for 70.0 for other locales"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'es-ES'
self.view(req, version='70.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index.html']
# end 70.0 whatsnew tests
# begin 71.0 whatsnew tests
@patch.object(fx_views, 'lang_file_is_active', lambda *x: True)
def test_fx_71_0_0(self, render_mock):
"""Should use whatsnew-fx71 template for 71.0"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'en-US'
self.view(req, version='71.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx71.html']
@patch.object(fx_views, 'lang_file_is_active', lambda *x: False)
def test_fx_71_0_0_fallback(self, render_mock):
"""Should use default template for 71.0 as fallback"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'es-ES'
self.view(req, version='71.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index-account.html']
# end 71.0 whatsnew tests
# begin 72.0 whatsnew tests
@patch.object(fx_views, 'lang_file_is_active', lambda *x: True)
def test_fx_72_0_0(self, render_mock):
"""Should use whatsnew-fx71 template for 72.0"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'en-US'
self.view(req, version='72.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx71.html']
@patch.object(fx_views, 'lang_file_is_active', lambda *x: False)
def test_fx_72_0_0_fallback(self, render_mock):
"""Should use default template for 72.0 as fallback"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'es-ES'
self.view(req, version='72.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index-account.html']
# end 72.0 whatsnew tests
# begin 73.0 whatsnew tests
@patch.object(fx_views, 'lang_file_is_active', lambda *x: True)
def test_fx_73_0_0(self, render_mock):
"""Should use whatsnew-fx73 template for 73.0"""
req = self.rf.get('/firefox/whatsnew/')
req.locale = 'en-US'
self.view(req, version='73.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/whatsnew-fx73.html']
# end 73.0 whatsnew tests
@patch('bedrock.firefox.views.l10n_utils.render', return_value=HttpResponse())
class TestWhatsNewIndia(TestCase):
def setUp(self):
self.view = fx_views.WhatsNewIndiaView.as_view()
self.rf = RequestFactory(HTTP_USER_AGENT='Firefox')
def test_fx_india(self, render_mock):
"""Should use whatsnew-india template for india for en-* locales"""
req = self.rf.get('/firefox/whatsnew/india/')
req.locale = 'en-GB'
self.view(req, version='70.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/whatsnew/index-lite.html']
@patch('bedrock.firefox.views.l10n_utils.render', return_value=HttpResponse())
class TestFirstRun(TestCase):
def setUp(self):
self.view = fx_views.FirstrunView.as_view()
self.rf = RequestFactory()
@override_settings(DEV=True)
def test_fx_firstrun_40_0(self, render_mock):
"""Should use default firstrun template"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='40.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/firstrun/firstrun.html']
@override_settings(DEV=True)
def test_fx_firstrun_56_0(self, render_mock):
"""Should use the default firstrun template"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='56.0a2')
template = render_mock.call_args[0][1]
assert template == ['firefox/firstrun/firstrun.html']
@override_settings(DEV=True)
def test_fxdev_firstrun_57_0(self, render_mock):
"""Should use 57 quantum dev edition firstrun template"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='57.0a2')
template = render_mock.call_args[0][1]
assert template == ['firefox/developer/firstrun.html']
@override_settings(DEV=True)
def test_fx_firstrun_57_0(self, render_mock):
"""Should use 57 quantum firstrun template"""
req = self.rf.get('/en-US/firefox/firstrun/')
self.view(req, version='57.0')
template = render_mock.call_args[0][1]
assert template == ['firefox/firstrun/firstrun.html']
# test redirect to /firefox/new/ for legacy /firstrun URLs - Bug 1343823
@override_settings(DEV=True)
def test_fx_firstrun_legacy_redirect(self, render_mock):
req = self.rf.get('/firefox/firstrun/')
req.locale = 'en-US'
resp = self.view(req, version='39.0')
assert resp.status_code == 301
assert resp['location'].endswith('/firefox/new/')
def test_fx_firstrun_dev_edition_legacy_redirect(self, render_mock):
req = self.rf.get('/firefox/firstrun/')
req.locale = 'en-US'
resp = self.view(req, version='39.0a2')
assert resp.status_code == 301
assert resp['location'].endswith('/firefox/new/')
| sgarrity/bedrock | bedrock/firefox/tests/test_base.py | Python | mpl-2.0 | 23,818 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the wordcount example."""
# pytype: skip-file
import collections
import logging
import re
import tempfile
import unittest
from apache_beam.examples.dataframe import wordcount
from apache_beam.testing.util import open_shards
class WordCountTest(unittest.TestCase):
SAMPLE_TEXT = """
a
a b
a b c
loooooonger words
"""
def create_temp_file(self, contents):
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(contents.encode('utf-8'))
return f.name
def test_basics(self):
temp_path = self.create_temp_file(self.SAMPLE_TEXT)
expected_words = collections.defaultdict(int)
for word in re.findall(r'[\w]+', self.SAMPLE_TEXT):
expected_words[word] += 1
wordcount.run(['--input=%s*' % temp_path, '--output=%s.result' % temp_path])
# Parse result file and compare.
results = []
with open_shards(temp_path + '.result-*') as result_file:
for line in result_file:
match = re.search(r'(\S+),([0-9]+)', line)
if match is not None:
results.append((match.group(1), int(match.group(2))))
elif line.strip():
self.assertEqual(line.strip(), 'word,count')
self.assertEqual(sorted(results), sorted(expected_words.items()))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| lukecwik/incubator-beam | sdks/python/apache_beam/examples/dataframe/wordcount_test.py | Python | apache-2.0 | 2,150 |
#!/usr/bin/env python
# Author:
# Rudiger Birkner (Networked Systems Group ETH Zurich)
import argparse
import json
from multiprocessing import Queue
from Queue import Empty
from threading import Thread
from time import sleep, time
import socket
import os
import sys
np = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if np not in sys.path:
sys.path.append(np)
import util.log
''' LogClient for Reference Monitor '''
class LogClient(object):
def __init__(self, address, port, authkey, input_file, debug = False, timing = False):
self.logger = util.log.getLogger('log_client')
self.logger.info('server: start')
self.timing = timing
self.address = address
self.port = int(port)
self.authkey = authkey
self.input_file = input_file
self.real_start_time = time()
self.simulation_start_time = 0
self.fp_thread = None
self.fs_thread = None
self.flow_mod_queue = Queue()
def start(self):
self.run = True
self.fp_thread = Thread(target=self.file_processor)
self.fp_thread.setDaemon(True)
self.fp_thread.start()
self.logger.debug('file processor started')
self.fs_thread = Thread(target=self.flow_mod_sender)
self.fs_thread.setDaemon(True)
self.fs_thread.start()
self.logger.debug('flow mod sender started')
def stop(self):
self.run = False
self.fs_thread.join()
self.logger.debug('flow mod sender terminated')
self.fp_thread.join()
self.logger.debug('file processor terminated')
self.flow_mod_queue.close()
''' receiver '''
def file_processor(self):
with open(self.input_file) as infile:
flag = 0
tmp = {}
for line in infile:
if line.startswith("BURST"):
flag = 1
tmp = {"flow_mods": []}
x = line.split("\n")[0].split(": ")[1]
tmp["time"] = float(x)
elif line.startswith("PARTICIPANT") and flag == 1:
flag = 2
x = line.split("\n")[0].split(": ")[1]
tmp["auth_info"] = {"participant": int(x), "auth_key": "secrect"}
elif flag == 2:
if line.startswith("\n"):
if not self.run:
break
self.logger.debug('processed one burst')
self.flow_mod_queue.put(tmp)
while self.flow_mod_queue.qsize() > 32000:
self.logger.debug('queue is full - taking a break')
sleep(self.sleep_time(tmp["time"])/2)
if not self.run:
break
flag = 0
else:
tmp["flow_mods"].append(json.loads(line))
self.logger.debug('finished processing the log')
def flow_mod_sender(self):
while self.run:
try:
flow_mod = self.flow_mod_queue.get(True, 0.5)
except Empty:
continue
if self.timing:
if self.simulation_start_time == 0:
self.real_start_time = time()
self.simulation_start_time = flow_mod["time"]
sleep_time = self.sleep_time(flow_mod["time"])
self.logger.debug('sleep for ' + str(sleep_time) + ' seconds')
sleep(sleep_time)
self.send(flow_mod)
def send(self, flow_mod):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.address, self.port))
sock.sendall(json.dumps(flow_mod))
sock.close()
def sleep_time(self, flow_mod_time):
time_diff = flow_mod_time - self.simulation_start_time
wake_up_time = self.real_start_time + time_diff
sleep_time = wake_up_time - time()
if sleep_time < 0:
sleep_time = 0
return sleep_time
def main(argv):
log_client_instance = LogClient(args.ip, args.port, args.key, args.input, True, args.timing)
log_client_instance.start()
while log_client_instance.run:
try:
sleep(0.5)
except KeyboardInterrupt:
log_client_instance.stop()
''' main '''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('ip', help='ip address of the refmon')
parser.add_argument('port', help='port of the refmon')
parser.add_argument('key', help='authkey of the refmon')
parser.add_argument('input', help='flow mod input file')
parser.add_argument('-t', '--timing', help='enable timed replay of flow mods', action='store_true')
args = parser.parse_args()
main(args)
| sdn-ixp/iSDX | flanc/log_client.py | Python | apache-2.0 | 4,946 |
#!/usr/bin/python
# This file is part of Morse.
#
# Morse is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Morse is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Morse. If not, see <http://www.gnu.org/licenses/>.
class PluginError (StandardError):
pass
| retooth/morse | morse/api/exceptions.py | Python | gpl-3.0 | 742 |
# Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
from autokeras import test_utils
from autokeras.adapters import input_adapters
from autokeras.utils import data_utils
def test_structured_data_input_unsupported_type_error():
with pytest.raises(TypeError) as info:
adapter = input_adapters.StructuredDataAdapter()
adapter.adapt("unknown", batch_size=32)
assert "Unsupported type" in str(info.value)
def test_structured_data_input_transform_to_dataset():
x = tf.data.Dataset.from_tensor_slices(
pd.read_csv(test_utils.TRAIN_CSV_PATH).to_numpy().astype(np.unicode)
)
adapter = input_adapters.StructuredDataAdapter()
x = adapter.adapt(x, batch_size=32)
assert isinstance(x, tf.data.Dataset)
def test_image_input_adapter_transform_to_dataset():
x = test_utils.generate_data()
adapter = input_adapters.ImageAdapter()
assert isinstance(adapter.adapt(x, batch_size=32), tf.data.Dataset)
def test_image_input_unsupported_type():
x = "unknown"
adapter = input_adapters.ImageAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to ImageInput to be numpy" in str(info.value)
def test_image_input_numerical():
x = np.array([[["unknown"]]])
adapter = input_adapters.ImageAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to ImageInput to be numerical" in str(info.value)
def test_input_type_error():
x = "unknown"
adapter = input_adapters.InputAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to Input to be numpy" in str(info.value)
def test_input_numerical():
x = np.array([[["unknown"]]])
adapter = input_adapters.InputAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to Input to be numerical" in str(info.value)
def test_text_adapt_unbatched_dataset():
x = tf.data.Dataset.from_tensor_slices(np.array(["a b c", "b b c"]))
adapter = input_adapters.TextAdapter()
x = adapter.adapt(x, batch_size=32)
assert data_utils.dataset_shape(x).as_list() == [None]
assert isinstance(x, tf.data.Dataset)
def test_text_adapt_batched_dataset():
x = tf.data.Dataset.from_tensor_slices(np.array(["a b c", "b b c"])).batch(32)
adapter = input_adapters.TextAdapter()
x = adapter.adapt(x, batch_size=32)
assert data_utils.dataset_shape(x).as_list() == [None]
assert isinstance(x, tf.data.Dataset)
def test_text_adapt_np():
x = np.array(["a b c", "b b c"])
adapter = input_adapters.TextAdapter()
x = adapter.adapt(x, batch_size=32)
assert data_utils.dataset_shape(x).as_list() == [None]
assert isinstance(x, tf.data.Dataset)
def test_text_input_type_error():
x = "unknown"
adapter = input_adapters.TextAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data to TextInput to be numpy" in str(info.value)
def test_time_series_input_type_error():
x = "unknown"
adapter = input_adapters.TimeseriesAdapter()
with pytest.raises(TypeError) as info:
x = adapter.adapt(x, batch_size=32)
assert "Expect the data in TimeseriesInput to be numpy" in str(info.value)
def test_time_series_input_transform_df_to_dataset():
adapter = input_adapters.TimeseriesAdapter()
x = adapter.adapt(pd.DataFrame(np.random.rand(100, 32)), batch_size=32)
assert isinstance(x, tf.data.Dataset)
| keras-team/autokeras | autokeras/adapters/input_adapters_test.py | Python | apache-2.0 | 4,248 |
#!/usr/bin/env python2
from sqlalchemy import *
def main():
if __name__ == '__main__':
main()
| opentokix/yolo-dangerzone | sqlite/main.py | Python | unlicense | 104 |
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_nova_flavor
short_description: Manage OpenStack compute flavors
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Add or remove flavors from OpenStack.
options:
state:
description:
- Indicate desired state of the resource. When I(state) is 'present',
then I(ram), I(vcpus), and I(disk) are all required. There are no
default values for those parameters.
choices: ['present', 'absent']
required: false
default: present
name:
description:
- Flavor name.
required: true
ram:
description:
- Amount of memory, in MB.
required: false
default: null
vcpus:
description:
- Number of virtual CPUs.
required: false
default: null
disk:
description:
- Size of local disk, in GB.
required: false
default: null
ephemeral:
description:
- Ephemeral space size, in GB.
required: false
default: 0
swap:
description:
- Swap space size, in MB.
required: false
default: 0
rxtx_factor:
description:
- RX/TX factor.
required: false
default: 1.0
is_public:
description:
- Make flavor accessible to the public.
required: false
default: true
flavorid:
description:
- ID for the flavor. This is optional as a unique UUID will be
assigned if a value is not specified.
required: false
default: "auto"
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
extra_specs:
description:
- Metadata dictionary
required: false
default: None
version_added: "2.3"
requirements: ["shade"]
'''
EXAMPLES = '''
- name: "Create 'tiny' flavor with 1024MB of RAM, 1 virtual CPU, and 10GB of local disk, and 10GB of ephemeral."
os_nova_flavor:
cloud: mycloud
state: present
name: tiny
ram: 1024
vcpus: 1
disk: 10
ephemeral: 10
- name: "Delete 'tiny' flavor"
os_nova_flavor:
cloud: mycloud
state: absent
name: tiny
- name: Create flavor with metadata
os_nova_flavor:
cloud: mycloud
state: present
name: tiny
ram: 1024
vcpus: 1
disk: 10
extra_specs:
"quota:disk_read_iops_sec": 5000
"aggregate_instance_extra_specs:pinned": false
'''
RETURN = '''
flavor:
description: Dictionary describing the flavor.
returned: On success when I(state) is 'present'
type: complex
contains:
id:
description: Flavor ID.
returned: success
type: string
sample: "515256b8-7027-4d73-aa54-4e30a4a4a339"
name:
description: Flavor name.
returned: success
type: string
sample: "tiny"
disk:
description: Size of local disk, in GB.
returned: success
type: int
sample: 10
ephemeral:
description: Ephemeral space size, in GB.
returned: success
type: int
sample: 10
ram:
description: Amount of memory, in MB.
returned: success
type: int
sample: 1024
swap:
description: Swap space size, in MB.
returned: success
type: int
sample: 100
vcpus:
description: Number of virtual CPUs.
returned: success
type: int
sample: 2
is_public:
description: Make flavor accessible to the public.
returned: success
type: bool
sample: true
extra_specs:
description: Flavor metadata
returned: success
type: dict
sample:
"quota:disk_read_iops_sec": 5000
"aggregate_instance_extra_specs:pinned": false
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
def _system_state_change(module, flavor):
state = module.params['state']
if state == 'present' and not flavor:
return True
if state == 'absent' and flavor:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
state = dict(required=False, default='present',
choices=['absent', 'present']),
name = dict(required=False),
# required when state is 'present'
ram = dict(required=False, type='int'),
vcpus = dict(required=False, type='int'),
disk = dict(required=False, type='int'),
ephemeral = dict(required=False, default=0, type='int'),
swap = dict(required=False, default=0, type='int'),
rxtx_factor = dict(required=False, default=1.0, type='float'),
is_public = dict(required=False, default=True, type='bool'),
flavorid = dict(required=False, default="auto"),
extra_specs = dict(required=False, default=None, type='dict'),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['ram', 'vcpus', 'disk'])
],
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
extra_specs = module.params['extra_specs'] or {}
try:
cloud = shade.operator_cloud(**module.params)
flavor = cloud.get_flavor(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, flavor))
if state == 'present':
if not flavor:
flavor = cloud.create_flavor(
name=name,
ram=module.params['ram'],
vcpus=module.params['vcpus'],
disk=module.params['disk'],
flavorid=module.params['flavorid'],
ephemeral=module.params['ephemeral'],
swap=module.params['swap'],
rxtx_factor=module.params['rxtx_factor'],
is_public=module.params['is_public']
)
changed=True
else:
changed=False
old_extra_specs = flavor['extra_specs']
new_extra_specs = dict([(k, str(v)) for k, v in extra_specs.items()])
unset_keys = set(flavor['extra_specs'].keys()) - set(extra_specs.keys())
if unset_keys:
cloud.unset_flavor_specs(flavor['id'], unset_keys)
if old_extra_specs != new_extra_specs:
cloud.set_flavor_specs(flavor['id'], extra_specs)
changed = (changed or old_extra_specs != new_extra_specs)
module.exit_json(changed=changed,
flavor=flavor,
id=flavor['id'])
elif state == 'absent':
if flavor:
cloud.delete_flavor(name)
module.exit_json(changed=True)
module.exit_json(changed=False)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| andreaso/ansible | lib/ansible/modules/cloud/openstack/os_nova_flavor.py | Python | gpl-3.0 | 8,474 |
import ocl
import pyocl
import camvtk
import time
import vtk
import datetime
import math
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
a=ocl.Point(1,0.6,0.1)
myscreen.addActor(camvtk.Point(center=(a.x,a.y,a.z), color=(1,0,1)))
b=ocl.Point(0,1,0)
myscreen.addActor(camvtk.Point(center=(b.x,b.y,b.z), color=(1,0,1)))
c=ocl.Point(0,0,0.0)
myscreen.addActor(camvtk.Point(center=(c.x,c.y,c.z), color=(1,0,1)))
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(c.x,c.y,c.z)) )
myscreen.addActor( camvtk.Line(p1=(c.x,c.y,c.z),p2=(b.x,b.y,b.z)) )
myscreen.addActor( camvtk.Line(p1=(a.x,a.y,a.z),p2=(b.x,b.y,b.z)) )
t = ocl.Triangle(b,c,a)
radius1=1
angle = math.pi/4
#cutter = ocl.ConeCutter(0.37, angle)
#cutter = ocl.BallCutter(0.532)
#cutter = ocl.CylCutter(0.3)
#cutter = ocl.BullCutter(1,0.2)
#cutter = ocl.CylConeCutter(0.2,0.5,math.pi/9)
#cutter = ocl.BallConeCutter(0.4,0.6,math.pi/9)
cutter = ocl.BullConeCutter(0.4,0.1,0.7,math.pi/6)
#cutter = ocl.ConeConeCutter(0.4,math.pi/3,0.7,math.pi/6)
#cutter = ocl.ConeCutter(0.4, math.pi/3)
print cutter
#print cc.type
minx=-0.5
dx=0.0051
maxx=1.5
miny=-0.7
dy=dx
maxy=1.5
z=-1.8
clpoints = pyocl.CLPointGrid(minx,dx,maxx,miny,dy,maxy,z)
nv=0
nn=0
ne=0
nf=0
print len(clpoints), "cl-points to evaluate"
n=0
for cl in clpoints:
cutter.vertexDrop(cl,t)
cutter.edgeDrop(cl,t)
cutter.facetDrop(cl,t)
#cutter.dropCutter(cl,t)
n=n+1
if (n % int(len(clpoints)/10)) == 0:
print n/int(len(clpoints)/10), " ",
print "done."
print "rendering..."
print " len(clpoints)=", len(clpoints)
camvtk.drawCLPointCloud(myscreen, clpoints)
print "done."
origo = camvtk.Sphere(center=(0,0,0) , radius=0.1, color=camvtk.blue)
origo.SetOpacity(0.2)
myscreen.addActor( origo )
myscreen.camera.SetPosition(0.5, 3, 2)
myscreen.camera.SetFocalPoint(0.5, 0.5, 0)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
| AlanZatarain/opencamlib | scripts/drop-cutter/drop_cutter_one-triangle_3_compoundcutter.py | Python | gpl-3.0 | 2,219 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CancelBuild
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-build
# [START cloudbuild_v1_generated_CloudBuild_CancelBuild_sync]
from google.cloud.devtools import cloudbuild_v1
def sample_cancel_build():
# Create a client
client = cloudbuild_v1.CloudBuildClient()
# Initialize request argument(s)
request = cloudbuild_v1.CancelBuildRequest(
project_id="project_id_value",
id="id_value",
)
# Make the request
response = client.cancel_build(request=request)
# Handle the response
print(response)
# [END cloudbuild_v1_generated_CloudBuild_CancelBuild_sync]
| googleapis/python-cloudbuild | samples/generated_samples/cloudbuild_v1_generated_cloud_build_cancel_build_sync.py | Python | apache-2.0 | 1,476 |
#!/usr/bin/env python
"""
Driver for Vokaturi-based voice tone analyzer
"""
import sys
import time
import config
import loader
import audio_processor
import pika
from rabbit_client import RabbitConnection
from shared_config import RABBIT_HOST, RABBIT_PORT
DECISION_THRESHOLD = .65
def _prediction_to_word(prediction):
"""
Convert predicted class to class's name
"""
(prediction_class, [boring_confidence, engaging_confidence]) = prediction
if prediction_class == config.ENTHUSIASTIC_CLASS and engaging_confidence > DECISION_THRESHOLD:
return "Engaging {0}".format(engaging_confidence)
if prediction_class == config.MONOTONE_CLASS and boring_confidence > DECISION_THRESHOLD:
return "Boring {0}".format(boring_confidence)
return "Meh (boring: {0}, engaging: {1})".format(boring_confidence, engaging_confidence)
def predict(trained_model, features):
"""
Perform live prediction using trained model
"""
features = features.reshape(1, -1)
prediction = map(_prediction_to_word,
zip(trained_model.predict(features),
trained_model.predict_proba(features)))
sys.stderr.write("You are being " + str(prediction[0]) + "\n")
def main():
"""
Program entrypoint
"""
sys.stdout.write("Starting Vokaturi\n")
trained_model = loader.load_model()
if trained_model is None:
raise Exception("No trained model available")
sys.stdout.write("loaded model\n")
sys.stdout.write("Connecting to rabbit\n")
def _handle_audio(audio_data):
features = audio_processor.extract_features_from_string(audio_data)
predict(trained_model, features)
while True:
while True:
try:
client = RabbitConnection(RABBIT_HOST, RABBIT_PORT)
break
except pika.exceptions.AMQPConnectionError:
time.sleep(5)
client.add_audio_incoming_callback(_handle_audio)
try:
client.start()
except KeyboardInterrupt:
break
except pika.exceptions.IncompatibleProtocolError:
sys.stderr.write("Pika connection error\n")
client.stop()
if __name__ == "__main__":
main()
| wkronmiller/Cognitive-Teaching-Assistant | VokaturiAudio/lib/main.py | Python | gpl-3.0 | 2,243 |
from fftwrapper import FFT
import numpy as np
nsamp = 8
samprate = np.power(2, 8)
d1hz = [np.sin(x) for x in np.linspace(0, 2 * np.pi * nsamp, num=samprate * nsamp)]
d2hz = [np.sin(2 * x) for x in np.linspace(0, 2 * np.pi * nsamp, num=samprate * nsamp)]
def test_basic_d1hz():
winsize = np.power(2, 8)
nout = winsize // 2
majfreqbin, majfreqbini = 1, 1
f = FFT(d1hz, samprate, winsize)
assert f.freqbins[majfreqbini] == majfreqbin, "i=[%s], freqbin=[%s] expected=[%s]" % (majfreqbini, f.freqbins, majfreqbin)
for i, (ampl, phase) in enumerate(f.run()):
assert len(ampl) == len(phase) == nout == len(f.freqbins), "[%s] == [%s] == [%s] == [%s]" % (len(ampl), len(phase), nout, len(f.freqbins))
assert ampl.argmax() == majfreqbini
assert nsamp == i + 1, "nsamp [%s] != iterations [%s]" % (nsamp, i + 1)
def test_basic_d2hz():
winsize = np.power(2, 8)
nout = winsize // 2
majfreqbin, majfreqbini = 2, 2
f = FFT(d2hz, samprate, winsize)
assert f.freqbins[majfreqbini] == majfreqbin, "i=[%s], freqbin=[%s] expected=[%s]" % (majfreqbini, f.freqbins, majfreqbin)
for i, (ampl, phase) in enumerate(f.run()):
assert len(ampl) == len(phase) == nout == len(f.freqbins), "[%s] == [%s] == [%s] == [%s]" % (len(ampl), len(phase), nout, len(f.freqbins))
assert ampl.argmax() == majfreqbini
assert nsamp == i + 1, "nsamp [%s] != iterations [%s]" % (nsamp, i + 1)
def test_smallwin_d1hz():
winsize = np.power(2, 7)
nout = winsize // 2
majfreqbin, majfreqbini = 0, 0
f = FFT(d1hz, samprate, winsize)
assert f.freqbins[majfreqbini] == majfreqbin, "i=[%s], freqbin=[%s] expected=[%s]" % (majfreqbini, f.freqbins, majfreqbin)
for i, (ampl, phase) in enumerate(f.run()):
assert len(ampl) == len(phase) == nout == len(f.freqbins), "[%s] == [%s] == [%s] == [%s]" % (len(ampl), len(phase), nout, len(f.freqbins))
assert ampl.argmax() == majfreqbini
assert nsamp * 2 == i + 1, "nsamp [%s] != iterations [%s]" % (nsamp, i + 1)
def test_smallwin_d2hz():
winsize = np.power(2, 7)
nout = winsize // 2
majfreqbin, majfreqbini = 2, 1
f = FFT(d2hz, samprate, winsize)
assert f.freqbins[majfreqbini] == majfreqbin, "i=[%s], freqbin=[%s] expected=[%s]" % (majfreqbini, f.freqbins, majfreqbin)
for i, (ampl, phase) in enumerate(f.run()):
assert len(ampl) == len(phase) == nout == len(f.freqbins), "[%s] == [%s] == [%s] == [%s]" % (len(ampl), len(phase), nout, len(f.freqbins))
assert ampl.argmax() == majfreqbini
assert nsamp * 2 == i + 1, "nsamp [%s] != iterations [%s]" % (nsamp, i + 1)
def test_largewin_d1hz():
winsize = np.power(2, 9)
nout = winsize // 2
majfreqbin, majfreqbini = 1, 2
f = FFT(d1hz, samprate, winsize)
assert f.freqbins[majfreqbini] == majfreqbin, "i=[%s], freqbin=[%s] expected=[%s]" % (majfreqbini, f.freqbins, majfreqbin)
for i, (ampl, phase) in enumerate(f.run()):
assert len(ampl) == len(phase) == nout == len(f.freqbins), "[%s] == [%s] == [%s] == [%s]" % (len(ampl), len(phase), nout, len(f.freqbins))
assert ampl.argmax() == majfreqbini
assert nsamp / 2 == i + 1, "nsamp [%s] != iterations [%s]" % (nsamp, i + 1)
def test_largewin_d2hz():
winsize = np.power(2, 9)
nout = winsize // 2
majfreqbin, majfreqbini = 2, 4
f = FFT(d2hz, samprate, winsize)
assert f.freqbins[majfreqbini] == majfreqbin, "i=[%s], freqbin=[%s] expected=[%s]" % (majfreqbini, f.freqbins, majfreqbin)
for i, (ampl, phase) in enumerate(f.run()):
assert len(ampl) == len(phase) == nout == len(f.freqbins), "[%s] == [%s] == [%s] == [%s]" % (len(ampl), len(phase), nout, len(f.freqbins))
assert ampl.argmax() == majfreqbini
assert nsamp / 2 == i + 1, "nsamp [%s] != iterations [%s]" % (nsamp, i + 1)
def test_basic_highcut_d1hz():
winsize = np.power(2, 8)
majfreqbin, majfreqbini = 1, 1
highcut = 10
nout = 11
f = FFT(d1hz, samprate, winsize, highcut=highcut)
assert f.freqbins[majfreqbini] == majfreqbin, "i=[%s], freqbin=[%s] expected=[%s]" % (majfreqbini, f.freqbins, majfreqbin)
for i, (ampl, phase) in enumerate(f.run()):
assert len(ampl) == len(phase) == nout == len(f.freqbins), "[%s] == [%s] == [%s] == [%s]" % (len(ampl), len(phase), nout, len(f.freqbins))
assert ampl.argmax() == majfreqbini
assert nsamp == i + 1, "nsamp [%s] != iterations [%s]" % (nsamp, i + 1)
def test_smallwin_highcut_d1hz():
winsize = np.power(2, 7)
majfreqbin, majfreqbini = 0, 0
highcut = 10
nout = 6
f = FFT(d1hz, samprate, winsize, highcut=highcut)
assert f.freqbins[majfreqbini] == majfreqbin, "i=[%s], freqbin=[%s] expected=[%s]" % (majfreqbini, f.freqbins, majfreqbin)
for i, (ampl, phase) in enumerate(f.run()):
assert len(ampl) == len(phase) == nout == len(f.freqbins), "[%s] == [%s] == [%s] == [%s]" % (len(ampl), len(phase), nout, len(f.freqbins))
assert ampl.argmax() == majfreqbini
assert nsamp * 2 == i + 1, "nsamp [%s] != iterations [%s]" % (nsamp, i + 1)
def test_largewin_highcut_d1hz():
winsize = np.power(2, 9)
majfreqbin, majfreqbini = 2, 4
highcut = 10
nout = 21
f = FFT(d2hz, samprate, winsize, highcut=highcut)
assert f.freqbins[majfreqbini] == majfreqbin, "i=[%s], freqbin=[%s] expected=[%s]" % (majfreqbini, f.freqbins, majfreqbin)
for i, (ampl, phase) in enumerate(f.run()):
assert len(ampl) == len(phase) == nout == len(f.freqbins), "[%s] == [%s] == [%s] == [%s]" % (len(ampl), len(phase), nout, len(f.freqbins))
assert ampl.argmax() == majfreqbini, "[%s] != [%s]"
assert nsamp / 2 == i + 1, "nsamp [%s] != iterations [%s]" % (nsamp, i + 1)
| kevinjos/whalesong | fftwrapper_test.py | Python | agpl-3.0 | 5,737 |
import opcsim, seaborn as sns
d = opcsim.load_distribution("Urban")
ax = opcsim.plots.pdfplot(d)
ax.set_title("Urban Aerosol Distribution", fontsize=16)
sns.despine()
| dhhagan/opcsim | docs/_build/html/generated/opcsim-plots-pdfplot-1.py | Python | mit | 167 |
'''
Created on Jun 29, 2012
@author: ajju
'''
from common import Logger, HttpUtils, AddonUtils
from common.DataObjects import VideoHostingInfo, VideoInfo, VIDEO_QUAL_SD, \
XBMC_EXECUTE_PLUGIN
import urllib
import xbmcaddon # @UnresolvedImport
import xbmcgui # @UnresolvedImport
try:
import json
except ImportError:
import simplejson as json
def getVideoHostingInfo():
video_hosting_info = VideoHostingInfo()
video_hosting_info.set_video_hosting_image('')
video_hosting_info.set_video_hosting_name('Vevo add-on by BlueCop')
return video_hosting_info
def retrieveVideoInfo(videoUrl):
try:
xbmcaddon.Addon('plugin.video.vevo')
except:
dialog = xbmcgui.Dialog()
dialog.ok('[B][COLOR red]MISSING: [/COLOR][/B] VEVO add-on', '', 'Please install VEVO add-on created by BlueCop!', 'Available at http://code.google.com/p/bluecop-xbmc-repo/')
raise
video_info = VideoInfo()
video_info.set_video_hosting_info(getVideoHostingInfo())
video_info.set_video_id(videoUrl)
addon_url = 'plugin://plugin.video.vevo/?'
vevo_id = videoUrl.split('/')[-1]
if videoUrl.startswith('playlist'):
url = urllib.quote_plus('http://api.vevo.com/mobile/v2/playlist/%s.json?' % vevo_id)
addon_url += 'url=%s' % url
addon_url += '&mode=playPlaylist'
addon_url += '&duration=210'
addon_url += '&page=1'
video_info.add_video_link(XBMC_EXECUTE_PLUGIN, addon_url, addUserAgent=False, addReferer=False)
video_info.set_video_image('')
video_info.set_video_name(' ')
else:
url = 'http://videoplayer.vevo.com/VideoService/AuthenticateVideo?isrc=%s&extended=true' % vevo_id
video = json.loads(HttpUtils.HttpClient().getHtmlContent(url=url))['video']
title = ''
try:title = video['title'].encode('utf-8')
except: title = ''
video_image = video['imageUrl']
if len(video['featuredArtists']) > 0:
feats = ''
for featuredartist in video['featuredArtists']:
# featuredartist_image = featuredartist['image_url']
featuredartist_name = featuredartist['artistName'].encode('utf-8')
feats += featuredartist_name + ', '
feats = feats[:-2]
title += ' (ft. ' + feats + ')'
addon_url += 'url=%s' % vevo_id
addon_url += '&mode=playVideo'
addon_url += '&duration=210'
video_info.add_video_link(VIDEO_QUAL_SD, addon_url, addUserAgent=False, addReferer=False)
video_info.set_video_image(video_image)
video_info.set_video_name(title)
Logger.logDebug(addon_url)
video_info.set_video_stopped(False)
return video_info
| noba3/KoTos | addons/script.module.turtlex/lib/snapvideo/VevoDelegator.py | Python | gpl-2.0 | 2,773 |
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014, GEM Foundation.
#
# OpenQuake Risklib is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# OpenQuake Risklib is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with OpenQuake Risklib. If not, see
# <http://www.gnu.org/licenses/>.
import unittest
import mock
import numpy
from openquake.risklib import workflows
aaae = numpy.testing.assert_array_almost_equal
def asset(values, deductibles=None,
insurance_limits=None,
retrofitting_values=None):
return workflows.Asset('a1', 'taxonomy', 1, (0, 0), values,
1, deductibles, insurance_limits,
retrofitting_values)
class ScenarioTestCase(unittest.TestCase):
loss_type = 'structural'
def test_call(self):
vf = mock.MagicMock()
calc = workflows.Scenario('PGA', 'TAXO', vf, True)
assets = [asset(
dict(structural=10),
deductibles=dict(structural=0.1),
insurance_limits=dict(structural=0.8))] * 4
calc.risk_functions[self.loss_type].apply_to = mock.Mock(
return_value=numpy.empty((4, 2)))
out = calc(self.loss_type, assets, mock.Mock(), mock.Mock())
self.assertEqual((4, 2), out.loss_matrix.shape)
self.assertEqual((2,), out.aggregate_losses.shape)
self.assertEqual((4, 2), out.insured_loss_matrix.shape)
self.assertEqual((2,), out.insured_losses.shape)
def test_call_no_insured(self):
vf = mock.MagicMock()
calc = workflows.Scenario('PGA', 'TAXO', vf, False)
assets = [asset(dict(structural=10))] * 4
vf = calc.risk_functions[self.loss_type]
vf.apply_to = mock.Mock(return_value=numpy.empty((4, 2)))
out = calc(self.loss_type, assets, mock.Mock(), mock.Mock())
self.assertEqual((4, 2), out.loss_matrix.shape)
self.assertEqual((2,), out.aggregate_losses.shape)
self.assertIsNone(out.insured_loss_matrix)
self.assertIsNone(out.insured_losses)
class DamageTest(unittest.TestCase):
def test_generator(self):
with mock.patch('openquake.risklib.scientific.scenario_damage') as m:
fragility_functions = mock.Mock()
calc = workflows.Damage(
'PGA', 'TAXO', dict(damage=fragility_functions))
calc('damage', 'assets', 'hazard', None)
self.assertEqual(m.call_count, 6) # called 3 x 2 times
| raoanirudh/oq-risklib | openquake/risklib/tests/workflows_test.py | Python | agpl-3.0 | 2,909 |
# -*- coding: utf-8 -*-
import datetime
import json
import time
from oauth2client.client import AccessTokenRefreshError
from django.core.cache import cache
from django.utils.translation import get_language
from django.utils.encoding import force_str
from yawdadmin.resources import admin_site
from conf import settings as ls
from models import AppOption
from django.db.utils import OperationalError
def get_option_cache_key(optionset_label):
return 'yawdadmin_options_%s' % optionset_label
def get_option(optionset_label, name, current_only=True, as_stored=False):
"""
Return the value of an option. The ``current only`` kwarg affects
only language-dependant options and decides whether to return its value
for all languages or only the current language.
"""
# Hit the cache
optionset = get_options(optionset_label, current_only, as_stored)
if name in optionset:
return optionset[name]
return None
def get_options(optionset_label, current_only=True, as_stored=False):
"""
Return all options for this app_label as dictionary with the option name
being the key.
"""
# Hit the cache
cached = {}
if ls.ADMIN_CACHE_DB_OPTIONS:
cached = cache.get(get_option_cache_key(optionset_label), {})
if cached:
options = cached
else:
options = AppOption.objects.filter(optionset_label=optionset_label)
if ls.ADMIN_CACHE_DB_OPTIONS:
try:
cache.set(get_option_cache_key(optionset_label), options,
ls.ADMIN_CACHE_DB_OPTIONS)
except OperationalError:
# Not exists the cache
return {}
optionset_admin = admin_site.get_optionset_admin(optionset_label)
option_dict = {}
for option in options:
option_dict[force_str(option.name)] = get_option_value(optionset_admin,
option,
current_only,
as_stored)
return option_dict
def get_option_value(optionset_admin, db_option, current_only, as_stored):
"""
Given an AppOption object, return its value for the current language.
"""
if not db_option:
return None
name = force_str(db_option.name)
if not name in optionset_admin.options:
return None
field = optionset_admin.options[name]
if not db_option.lang_dependant:
if as_stored:
return db_option.value
return field.to_python(db_option.value) if db_option.value else ''
value_dict = {}
for key, value in json.loads(db_option.value).items():
value_dict[force_str(key)] = value
if current_only:
curr_lang = get_language()
if curr_lang in value_dict:
if as_stored:
return value_dict[curr_lang]
return field.to_python(value_dict[curr_lang]) if value_dict[curr_lang] else ''
else:
for key in value_dict:
value_dict[key] = field.to_python(value_dict[key]) if as_stored else \
value_dict[key]
return value_dict
def get_analytics_data(http):
#try to get cached data
data = cache.get('yawdadmin_ga', None)
if data:
return data
from apiclient.discovery import build
from apiclient.errors import HttpError
end_date = datetime.datetime.now()
start_date = end_date + datetime.timedelta(-ls.ADMIN_GOOGLE_ANALYTICS['interval'])
try:
service = build('analytics', 'v3', http=http)
pie_data = service.data().ga()\
.get(ids = 'ga:' + ls.ADMIN_GOOGLE_ANALYTICS['profile_id'],
start_date = start_date.strftime('%Y-%m-%d'),
end_date = end_date.strftime('%Y-%m-%d'),
metrics='ga:visits',
dimensions='ga:date,ga:visitorType').execute()
summed_data = service.data().ga()\
.get(ids = 'ga:' + ls.ADMIN_GOOGLE_ANALYTICS['profile_id'],
start_date = start_date.strftime('%Y-%m-%d'), end_date = end_date.strftime('%Y-%m-%d'),
metrics='ga:pageviews, ga:visitors, ga:avgTimeOnSite, '
'ga:entranceBounceRate, ga:percentNewVisits').execute()
except HttpError as e:
return {'error': e.resp.reason}
except AccessTokenRefreshError:
return {'error': 'refresh'}
if not 'rows' in summed_data:
return {'error': 'empty'}
data = {
'summed': {
'visits': pie_data['totalsForAllResults']['ga:visits'],
'pageviews': summed_data['rows'][0][0],
'visitors': summed_data['rows'][0][1],
'avg_time': time.strftime('%H:%M:%S', time.gmtime(float(summed_data['rows'][0][2]))),
'bounce_rate': round(float(summed_data['rows'][0][3]), 2),
'new_visits': round(float(summed_data['rows'][0][4]), 2),
},
'chart': _extract_chart_data(pie_data, start_date, end_date)
}
cache.set('yawdadmin_ga', data, 3600)
return data
def _extract_chart_data(pie_data, start_date, end_date):
"""
Format the Google Analytics data for use within an Area Chart.
"""
def update_record(record):
visits = 0
for key in ('new','returning'):
if not key in record:
record[key] = 0
visits += record[key]
record['total'] = visits
date = start_date
current_row = date.strftime('%Y%m%d')
data = [{ 'date': date.strftime('%A, %B %d, %Y')}]
#calculate chart-ready data
for row in pie_data['rows']:
while row[0] != current_row:
update_record(data[len(data)-1])
date = date + datetime.timedelta(1)
data.append({'date': date.strftime('%A, %B %d, %Y')})
current_row = date.strftime('%Y%m%d')
if row[1] == 'New Visitor':
data[len(data)-1]['new'] = int(row[2])
else:
data[len(data)-1]['returning'] = int(row[2])
update_record(data[len(data)-1])
return data
| mwolff44/yawd-admin | yawdadmin/utils.py | Python | bsd-3-clause | 6,198 |
from django.shortcuts import render
from .d2lstat import process_file, calculateVirtualClassroomStats, facultyNotUsingD2LCalculation
from .forms import UploadFileForm, VirtualClassroomUsageForm, FacultyNotUsingD2LForm
def index(request):
if request.method == 'POST':
process_file(request.FILES['usage'].temporary_file_path(),
request.FILES['full'].temporary_file_path(),
request.FILES['part'].temporary_file_path(),
request.POST['semester'],
request.POST['total_courses'])
return render(request, 'd2lstat/report.html')
else:
form = UploadFileForm()
return render(request, 'd2lstat/index.html', {'form': form})
def virtualClassroomStats(request):
if request.method == 'POST':
statsList = calculateVirtualClassroomStats(request.FILES['usage'].temporary_file_path(),
request.FILES['full'].temporary_file_path(),
request.FILES['part'].temporary_file_path(),
request.FILES['virtualClassroomData'].temporary_file_path())
return render(request, 'd2lstat/virtualClassroomStatsResults.html', {'statsList':statsList})
else:
form = VirtualClassroomUsageForm()
return render(request, 'd2lstat/virtualClassroomStats.html', {'form': form})
def facultyNotUsingD2L(request):
if request.method == 'POST':
statsList = facultyNotUsingD2LCalculation(request.FILES['usage'].temporary_file_path(),
request.FILES['full'].temporary_file_path(),
request.FILES['part'].temporary_file_path(),
request.POST['semester'])
return render(request, 'd2lstat/FacultyNotUsingD2LResults.html', {'statsList':statsList})
else:
form = FacultyNotUsingD2LForm()
return render(request, 'd2lstat/FacultyNotUsingD2L.html', {'form': form})
| UofS-CTLE/Projtrack3 | ctleweb/d2lstat/views.py | Python | gpl-3.0 | 1,924 |
import csv
import re
import dedupe.core
def preProcess(column) :
column = re.sub(' +', ' ', column)
column = re.sub('\n', ' ', column)
column = column.strip().strip('"').strip("'").lower()
return column
def readData(filename) :
data_d = {}
with open(filename) as f :
reader = csv.DictReader(f, delimiter=',', quotechar='"')
for i, row in enumerate(reader) :
clean_row = [(k, preProcess(v)) for k,v in row.iteritems()]
data_d[i] = dedupe.core.frozendict(clean_row)
return(data_d, reader.fieldnames)
def print_csv(input_file, output_file, header, clustered_dupes) :
orig_data = {}
with open(input_file) as f :
reader = csv.reader(f)
reader.next()
for row_id, row in enumerate(reader) :
orig_data[row_id] = row
#with open("examples/output/ECP_dupes_list_" + str(time.time()) + ".csv","w") as f :
with open(output_file,"w") as f :
writer = csv.writer(f)
heading_row = header
heading_row.insert(0, "Group_ID")
writer.writerow(heading_row)
dupe_id_list = []
for group_id, cluster in enumerate(clustered_dupes, 1) :
for candidate in sorted(cluster) :
dupe_id_list.append(candidate)
row = orig_data[candidate]
row.insert(0, group_id)
writer.writerow(row)
for id in orig_data :
if id not in set(dupe_id_list) :
row = orig_data[id]
row.insert(0, 'x')
writer.writerow(row)
| neozhangthe1/dedupe | tests/exampleIO.py | Python | mit | 1,455 |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Security Center API client."""
import json
import unittest
import unittest.mock as mock
import google.auth
from google.oauth2 import credentials
from google.cloud.forseti.common.gcp_api import securitycenter
from google.cloud.forseti.common.gcp_api import errors as api_errors
from tests import unittest_utils
from tests.common.gcp_api.test_data import fake_securitycenter_responses as fake_cscc
from tests.common.gcp_api.test_data import http_mocks
class SecurityCenterTest(unittest_utils.ForsetiTestCase):
"""Test the Security Center Client."""
@classmethod
@mock.patch.object(
google.auth, 'default',
return_value=(mock.Mock(spec_set=credentials.Credentials),
'test-project'))
def setUpClass(cls, mock_google_credential):
"""Set up."""
fake_global_configs = {
'securitycenter': {'max_calls': 1, 'period': 1.1}}
cls.securitycenter_client = securitycenter.SecurityCenterClient(fake_global_configs)
cls.project_id = 111111
cls.source_id = 'organizations/111/sources/222'
def test_create_findings(self):
"""Test create cscc findings."""
http_mocks.mock_http_response(
json.dumps(fake_cscc.EXPECTED_CREATE_FINDING_RESULT))
result = self.securitycenter_client.create_finding(
'fake finding',
source_id=self.source_id
)
self.assertEqual(fake_cscc.EXPECTED_CREATE_FINDING_RESULT, result)
def test_create_findings_raises(self):
"""Test create cscc finding raises exception."""
http_mocks.mock_http_response(fake_cscc.PERMISSION_DENIED, '403')
fake_finding = {'source_properties': {'violation_data': 'foo'}}
with self.assertRaises(api_errors.ApiExecutionError):
self.securitycenter_client.create_finding(
fake_finding,
source_id=self.source_id)
if __name__ == '__main__':
unittest.main()
| forseti-security/forseti-security | tests/common/gcp_api/securitycenter_test.py | Python | apache-2.0 | 2,601 |
# -*- coding: utf-8 -*-
# Time-stamp: <2011-12-30 12:16:47 armin>
import unittest
from memacs.lib.reader import CommonReader
class TestReader(unittest.TestCase):
def test_file_no_path(self):
try:
CommonReader.get_data_from_file("")
self.assertTrue(False, "false path failed")
except SystemExit:
pass
| dwinters42/Memacs | memacs/lib/tests/reader_test.py | Python | gpl-3.0 | 360 |
#!/usr/bin/env python2.7
__author__ = ['[Brandon Amos](http://bamos.github.io)']
__date__ = '2014.04.19'
"""
This script (music-organizer.py) organizes my music collection for
iTunes and [mpv](http://mpv.io) using tag information.
The directory structure is `<artist>/<track>`, where `<artist>` and `<track>`
are lower case strings separated by dashes.
See my blog post
[Using Python to organize a music directory](http://bamos.github.io/2014/07/05/music-organizer/)
for a more detailed overview of this script.
"""
import argparse
import glob
import os
import re
import shutil
import sys
from mutagen.easyid3 import EasyID3
parser = argparse.ArgumentParser(
description='''Organizes a music collection using tag information.
The directory format is that the music collection consists of
artist subdirectories, and there are 2 modes to operate on
the entire collection or a single artist.
All names are made lowercase and separated by dashes for easier
navigation in a Linux filesystem.'''
)
parser.add_argument('--delete-conflicts', action='store_true',
dest='delete_conflicts',
help='''If an artist has duplicate tracks with the same name,
delete them. Note this might always be best in case an
artist has multiple versions. To keep multiple versions,
fix the tag information.''')
parser.add_argument('--ignore-multiple-artists', action='store_true',
dest='ignore_multiple_artists',
help='''This script will prompt for confirmation if an artist
directory has songs with more than 2 different tags.
This flag disables the confirmation and won't perform
this check.''')
parser.add_argument('--collection', action='store_true',
help='''Operate in 'collection' mode and run 'artist' mode
on every subdirectory.''')
parser.add_argument('--artist', action='store_true',
help='''Operate in 'artist' mode and copy all songs to the
root of the directory and cleanly format the names to
be easily typed and navigated in a shell.''')
parser.add_argument('--delete-unrecognized-extensions', action='store_true',
dest='delete_unrecognized')
args = parser.parse_args()
if args.collection and args.artist:
print("Error: Only provide 1 of '--collection' or '--artist'.")
sys.exit(-1)
elif not (args.collection or args.artist):
print("Error: Mode '--collection' or '--artist' not provided.")
sys.exit(-1)
# Maps a string such as 'The Beatles' to 'the-beatles'.
def toNeat(s):
s = s.lower().replace("&", "and")
# Put spaces between and remove blank characters.
blankCharsPad = r"()\[\],.\\\?\#/\!\$\:\;"
blankCharsNoPad = r"'\""
s = re.sub(r"([" + blankCharsPad + r"])([^ ])", "\\1 \\2", s)
s = re.sub("[" + blankCharsPad + blankCharsNoPad + "]", "", s)
# Replace spaces with a single dash.
s = re.sub(r"[ \*\_]+", "-", s)
s = re.sub("-+", "-", s)
s = re.sub("^-*", "", s)
s = re.sub("-*$", "", s)
# Ensure the string is only alphanumeric with '-', '+', and '='.
search = re.search("[^0-9a-z\-\+\=]", s)
if search:
print("Error: Unrecognized character in '" + s + "'")
sys.exit(-42)
return s
def artist(artistDir):
print("Organizing artist '" + artistDir + "'.")
if not args.ignore_multiple_artists:
artists = set()
for dirname, dirnames, filenames in os.walk(artistDir):
# Make sure there aren't a lot of different artists
# in case this was called from the wrong directory.
for filename in filenames:
try:
audio = EasyID3(os.path.join(dirname, filename))
artist = audio['artist'][0].decode()
artists.add(artist)
except:
pass
if len(artists) > 2:
while True:
print("Warning: More than 2 artists found in '{}'.".format(
artistDir))
print("This will move all songs to the '{}' directory.".format(
artistDir))
print("Continue? yes/no")
choice = raw_input().lower()
valid = {"yes": True, "y": True, "no": False, "n": False}
if choice in valid:
if valid[choice]:
break
else:
print("Exiting.")
sys.exit(-1)
delete_dirs = []
for dirname, dirnames, filenames in os.walk(artistDir):
# Move all the files to the root directory.
for filename in filenames:
ext = os.path.splitext(filename)[1]
if ext == ".mp3":
fullPath = os.path.join(dirname, filename)
print("file: " + str(fullPath))
try:
audio = EasyID3(fullPath)
title = audio['title'][0].encode('ascii', 'ignore')
print(" title: " + title)
except:
title = None
if not title:
print("Error: title not found for '" + filename + "'")
sys.exit(-42)
neatTitle = toNeat(title)
print(" neatTitle: " + neatTitle)
newFullPath = os.path.join(artistDir, neatTitle + ext)
print(" newFullPath: " + newFullPath)
if newFullPath != fullPath:
if os.path.isfile(newFullPath):
if args.delete_conflicts:
os.remove(fullPath)
print("File exists: '" + newFullPath + "'")
print("Deleted: '" + fullPath + "'")
else:
print("Error: File exists: '" + newFullPath + "'")
sys.exit(-42)
else:
os.rename(fullPath, newFullPath)
elif ext == ".pdf":
pass
else:
if not args.delete_unrecognized:
print("Error: Unrecognized file extension in '{}'.".format(
filename))
sys.exit(-42)
# Delete all subdirectories.
for subdirname in dirnames:
delete_dirs.append(subdirname)
for d in delete_dirs:
shutil.rmtree(os.path.join(artistDir, d), ignore_errors=True)
def song(filename):
if filename[0] == '.':
print("Ignoring dotfile: '{}'".format(filename))
return
print("Organizing song '" + filename + "'.")
ext = os.path.splitext(filename)[1]
try:
audio = EasyID3(filename)
artist = audio['artist'][0].encode('ascii', 'ignore')
title = audio['title'][0].encode('ascii', 'ignore')
print(" artist: " + artist)
print(" title: " + title)
except:
artist = None
title = None
neatArtist = toNeat(artist)
neatTitle = toNeat(title)
print(" neatArtist: " + neatArtist)
print(" neatTitle: " + neatTitle)
if not os.path.isdir(neatArtist):
os.mkdir(neatArtist)
newFullPath = os.path.join(neatArtist, neatTitle + ext)
os.rename(filename, newFullPath)
def collection():
for f in glob.glob('*'):
if os.path.isdir(f):
if f != 'iTunes' and f != 'playlists':
artist(f)
elif os.path.isfile(f):
song(f)
if args.artist:
artist('.')
else:
collection()
print("\nComplete!")
| bamos/python-scripts | python2.7/music-organizer.py | Python | mit | 7,790 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import re
from io import open
import ruamel.yaml as yaml
from shellfoundry.exceptions import YmlFieldMissingException
from shellfoundry.utilities.constants import (
TEMPLATE_PROPERTY,
TEMPLATE_VERSION,
TOSCA_META_LOCATION,
)
class DefinitionModification(object):
def __init__(self, shell_path):
self.shell_path = shell_path
self.entry_definition = os.path.join(
self.shell_path, self._find_entry_definition()
)
def edit_definition(self, field, value):
"""Modify shell-definition.yaml.
:params field str: field name to modify
:params value str: new value to update
"""
self._edit_yaml(self.entry_definition, field, value)
def edit_tosca_meta(self, field, value):
with open(
os.path.join(self.shell_path, TOSCA_META_LOCATION), "r", encoding="utf8"
) as tosca_file:
is_changed = False
tosca_data = []
for line in tosca_file:
if field in line:
line = re.sub(r":\s+.*", ": {}".format(value), line)
is_changed = True
tosca_data.append(line)
if not is_changed:
tosca_data.append("\n{field}: {value}".format(field=field, value=value))
with open(
os.path.join(self.shell_path, TOSCA_META_LOCATION), "w", encoding="utf8"
) as tosca_file:
tosca_file.writelines(tosca_data)
def add_field_to_definition(self, field, value=None, overwrite=False):
"""Add new field to shell-definition.yaml.
:params field str: field name to add
:params value str: value to add
:params overwrite bool: overwrite value if it already exists
"""
try:
if overwrite:
self.edit_definition(field, value)
except YmlFieldMissingException:
value = value or self._get_value_from_definition(TEMPLATE_VERSION)
yaml_parser = yaml.YAML()
loaded = self._load_yaml(
yaml_parser=yaml_parser, yaml_file=self.entry_definition
)
section, field_name = field.split("/", 1)
loaded[section].update({field_name: value})
self._edit_file(
yaml_file=self.entry_definition, yaml_parser=yaml_parser, data=loaded
)
def add_properties(self, attribute_names):
"""Add property to shell-definition.yaml file.
:params fields tuple/list: sequence of properties name that will be added
"""
results = list(map(self._add_property, attribute_names))
for item in zip(attribute_names, results):
self._comment_attribute(*item)
def get_artifacts_files(self, artifact_name_list):
yaml_parser = yaml.YAML()
shell_definition = self._load_yaml(yaml_parser, self.entry_definition)
for node_type in list(shell_definition["node_types"].values()):
if "artifacts" not in node_type:
continue
result = {}
for artifact_name, artifact in node_type["artifacts"].items():
if artifact_name in artifact_name_list:
result.update({artifact_name: artifact["file"]})
return result
def _find_entry_definition(self):
with open(
os.path.join(self.shell_path, TOSCA_META_LOCATION), "r"
) as tosca_file:
entry_definition = dict(
list(map(str.strip, str(line).split(":", 1))) for line in tosca_file
)["Entry-Definitions"]
return entry_definition
def _load_yaml(self, yaml_parser, yaml_file):
with open(yaml_file, encoding="utf8") as stream:
try:
yaml_parser.indent(offset=2)
return yaml_parser.load(stream=stream)
except yaml.YAMLError as exc:
print(exc) # noqa: T001
def _edit_yaml(self, yaml_file, field, value):
yaml_parser = yaml.YAML()
loaded = self._load_yaml(yaml_parser=yaml_parser, yaml_file=yaml_file)
field_name = field.split("/")[-1]
self._get_inner_dict_recursively(loaded, field)[field_name] = value
self._edit_file(yaml_file=yaml_file, yaml_parser=yaml_parser, data=loaded)
def _edit_file(self, yaml_file, yaml_parser, data):
with open(yaml_file, "wb") as f:
yaml_parser.dump(data, stream=f)
def _get_inner_dict_recursively(self, dic, field):
split = field.split("/", 1)
i = dic.get(split[0])
if not i:
raise YmlFieldMissingException("Field does not exists")
if not isinstance(i, dict) and len(split) == 1:
return dic
return self._get_inner_dict_recursively(i, split[1])
def _get_value_from_definition(self, field):
yaml_parser = yaml.YAML()
loaded = self._load_yaml(yaml_parser, self.entry_definition)
field_name = field.split("/")[-1]
value = self._get_inner_dict_recursively(loaded, field)[field_name]
return value
def _add_property(self, attribute_name):
"""Add property to shell-definition.yaml file.
:params fields list: list of properties name that will be added
"""
yaml_parser = yaml.YAML()
loaded = self._load_yaml(yaml_parser, self.entry_definition)
nodes = loaded.get("node_types")
is_last = False
if nodes:
for key, value in nodes.items():
if key.startswith("vendor."):
properties_data = value.get("properties", {})
if properties_data:
properties_data.update({attribute_name: TEMPLATE_PROPERTY})
is_last = False
else:
value.insert(
1, "properties", {attribute_name: TEMPLATE_PROPERTY}
)
is_last = True
break
self._edit_file(
yaml_file=self.entry_definition, yaml_parser=yaml_parser, data=loaded
)
return is_last
def _comment_attribute(self, attribute_name, is_last=False):
"""Comment attribute in shell-definishion.yaml file."""
spaces = None
need_comment = False
lines = []
with open(self.entry_definition, "r", encoding="utf8") as f:
for line in f:
stripped = line.lstrip(" ")
if stripped.startswith("{}:".format(attribute_name)):
if is_last:
lines[-1] = "# {}".format(lines[-1])
spaces = len(line) - len(stripped)
need_comment = True
lines.append("# {}".format(line))
continue
if need_comment and spaces and (len(line) - len(stripped)) > spaces:
lines.append("# {}".format(line))
continue
need_comment = False
lines.append(line)
with open(self.entry_definition, "w", encoding="utf8") as f:
f.writelines(lines)
| QualiSystems/shellfoundry | shellfoundry/utilities/modifiers/definition/definition_modification.py | Python | apache-2.0 | 7,265 |
# coding=utf-8
from ..packet import Packet
from ..proto import Proto
from ..flags import Flags
class ChangeUser(Packet):
__slots__ = ('user', 'authResponse', 'schema', 'characterSet',
'capabilityFlags') + Packet.__slots__
def __init__(self):
super(ChangeUser, self).__init__()
self.user = ''
self.authResponse = ''
self.schema = ''
self.characterSet = 0
self.capabilityFlags = 0
def setCapabilityFlag(self, flag):
self.capabilityFlags |= flag
def removeCapabilityFlag(self, flag):
self.capabilityFlags &= ~flag
def toggleCapabilityFlag(self, flag):
self.capabilityFlags ^= flag
def hasCapabilityFlag(self, flag):
return ((self.capabilityFlags & flag) == flag)
def getPayload(self):
payload = bytearray()
payload.extend(Proto.build_byte(Flags.COM_CHANGE_USER))
payload.extend(Proto.build_null_str(self.user))
if not self.hasCapabilityFlag(Flags.CLIENT_SECURE_CONNECTION):
payload.extend(Proto.build_lenenc_str(self.authResponse))
else:
payload.extend(Proto.build_null_str(self.authResponse))
payload.extend(Proto.build_null_str(self.schema))
payload.extend(Proto.build_fixed_int(2, self.characterSet))
return payload
@staticmethod
def loadFromPacket(packet):
obj = ChangeUser()
proto = Proto(packet, 3)
obj.sequenceId = proto.get_fixed_int(1)
proto.get_filler(1)
obj.user = proto.get_null_str()
if not obj.hasCapabilityFlag(Flags.CLIENT_SECURE_CONNECTION):
obj.authResponse = proto.get_lenenc_str()
else:
obj.authResponse = proto.get_null_str()
obj.schema = proto.get_null_str()
obj.characterSet = proto.get_fixed_int(2)
return obj
| MPjct/PyMP | mysql_proto/com/changeuser.py | Python | mit | 1,870 |
from course_modes.models import CourseMode
from factory import DjangoModelFactory
# Factories don't have __init__ methods, and are self documenting
# pylint: disable=W0232
class CourseModeFactory(DjangoModelFactory):
FACTORY_FOR = CourseMode
course_id = u'MITx/999/Robot_Super_Course'
mode_slug = 'audit'
mode_display_name = 'audit course'
min_price = 0
currency = 'usd'
| morpheby/levelup-by | common/djangoapps/course_modes/tests/factories.py | Python | agpl-3.0 | 397 |
#!/usr/bin/env python
# csv_writer_a.py
# Copyright (C) ContinuumBridge Limited, 2014 - All Rights Reserved
# Written by Peter Claydon
#
ModuleName = "csv_writer"
import sys
import os.path
import time
import logging
from cbcommslib import CbApp
from cbconfig import *
FILENAME = CB_CONFIG_DIR + "csv_writer.csv"
config = {
"temperature": "True",
"temp_min_change": 0.2,
"irtemperature": "True",
"irtemp_min_change": 0.5,
"humidity": "True",
"humidity_min_change": 0.2,
"buttons": "True",
"accel": "False",
"accel_min_change": 0.02,
"accel_polling_interval": 3.0,
"gyro": "False",
"gyro_min_change": 0.5,
"gyro_polling_interval": 3.0,
"magnet": "True",
"magnet_min_change": 1.5,
"magnet_polling_interval": 3.0,
"binary": "True",
"luminance": "True",
"luminance_min_change": 1.0,
"slow_polling_interval": 300
}
class DataManager:
""" Managers data storage for all sensors """
def __init__(self, sendMessage):
self.sendMessage = sendMessage
self.now = self.niceTime(time.time())
self.cvsList = []
self.cvsLine = []
self.index = []
def niceTime(self, timeStamp):
localtime = time.localtime(timeStamp)
milliseconds = '%03d' % int((timeStamp - int(timeStamp)) * 1000)
now = time.strftime('%Y:%m:%d, %H:%M:%S:', localtime) + milliseconds
return now
def writeCVS(self, timeStamp):
self.then = self.now
self.now = self.niceTime(timeStamp)
if self.now != self.then:
self.f.write(self.then + ",")
for i in range(len(self.cvsLine)):
self.f.write(self.cvsLine[i] + ",")
self.cvsLine[i] = ""
self.f.write("\n")
def initFile(self, idToName):
self.idToName = idToName
for i in self.idToName:
self.index.append(self.idToName[i])
services = ["temperature",
"ir_temperature",
"accel x", "accel y", "accel z",
"buttons l", "buttons r",
"rel humidily",
"gyro x", "gyro y", "gyro z"]
self.numberServices = len(services)
for i in self.idToName:
for s in services:
self.cvsList.append(s)
self.cvsLine.append("")
if os.path.isfile(FILENAME):
self.f = open(FILENAME, "a+", 0)
else:
self.f = open(FILENAME, "a+", 0)
for d in self.idToName:
self.f.write(d + ", " + self.idToName[d] + "\n")
self.f.write("date, time, ")
for i in self.cvsList:
self.f.write(i + ", ")
self.f.write("\n")
def storeAccel(self, deviceID, timeStamp, a):
self.writeCVS(timeStamp)
index = self.index.index(deviceID)
for i in range(3):
self.cvsLine[index*self.numberServices + 2 + i] = str("%2.3f" %a[i])
def storeTemp(self, deviceID, timeStamp, temp):
self.writeCVS(timeStamp)
index = self.index.index(deviceID)
self.cvsLine[index*self.numberServices + 0] = str("%2.1f" %temp)
def storeIrTemp(self, deviceID, timeStamp, temp):
self.writeCVS(timeStamp)
index = self.index.index(deviceID)
self.cvsLine[index*self.numberServices + 1] = str("%2.1f" %temp)
def storeHumidity(self, deviceID, timeStamp, h):
self.writeCVS(timeStamp)
index = self.index.index(deviceID)
self.cvsLine[index*self.numberServices + 7] = str("%2.1f" %h)
def storeButtons(self, deviceID, timeStamp, buttons):
self.writeCVS(timeStamp)
index = self.index.index(deviceID)
self.cvsLine[index*self.numberServices + 5] = str(buttons["leftButton"])
self.cvsLine[index*self.numberServices + 6] = str(buttons["rightButton"])
def storeGyro(self, deviceID, timeStamp, gyro):
pass
def storeMagnet(self, deviceID, timeStamp, magnet):
self.writeCVS(timeStamp)
index = self.index.index(deviceID)
for i in range(3):
self.cvsLine[index*self.numberServices + 8 + i] = str("%2.3f" %magnet[i])
def storeBinary(self, deviceID, timeStamp, b):
pass
def storeLuminance(self, deviceID, timeStamp, v):
pass
class Accelerometer:
def __init__(self, id):
self.previous = [0.0, 0.0, 0.0]
self.id = id
def processAccel(self, resp):
accel = [resp["data"]["x"], resp["data"]["y"], resp["data"]["z"]]
timeStamp = resp["timeStamp"]
event = False
for a in range(3):
if abs(accel[a] - self.previous[a]) > config["accel_min_change"]:
event = True
break
if event:
self.dm.storeAccel(self.id, timeStamp, accel)
self.previous = accel
class TemperatureMeasure():
""" Either send temp every minute or when it changes. """
def __init__(self, id):
# self.mode is either regular or on_change
self.mode = "on_change"
self.minChange = 0.2
self.id = id
epochTime = time.time()
self.prevEpochMin = int(epochTime - epochTime%60)
self.currentTemp = 0.0
def processTemp (self, resp):
timeStamp = resp["timeStamp"]
temp = resp["data"]
if self.mode == "regular":
epochMin = int(timeStamp - timeStamp%60)
if epochMin != self.prevEpochMin:
temp = resp["data"]
self.dm.storeTemp(self.id, self.prevEpochMin, temp)
self.prevEpochMin = epochMin
else:
if abs(temp-self.currentTemp) >= config["temp_min_change"]:
self.dm.storeTemp(self.id, timeStamp, temp)
self.currentTemp = temp
class IrTemperatureMeasure():
""" Either send temp every minute or when it changes. """
def __init__(self, id):
# self.mode is either regular or on_change
self.mode = "on_change"
self.minChange = 0.2
self.id = id
epochTime = time.time()
self.prevEpochMin = int(epochTime - epochTime%60)
self.currentTemp = 0.0
def processIrTemp (self, resp):
timeStamp = resp["timeStamp"]
temp = resp["data"]
if self.mode == "regular":
epochMin = int(timeStamp - timeStamp%60)
if epochMin != self.prevEpochMin:
temp = resp["data"]
self.dm.storeTemp(self.id, self.prevEpochMin, temp)
self.prevEpochMin = epochMin
else:
if abs(temp-self.currentTemp) >= config["irtemp_min_change"]:
self.dm.storeIrTemp(self.id, timeStamp, temp)
self.currentTemp = temp
class Buttons():
def __init__(self, id):
self.id = id
def processButtons(self, resp):
timeStamp = resp["timeStamp"]
buttons = resp["data"]
self.dm.storeButtons(self.id, timeStamp, buttons)
class Gyro():
def __init__(self, id):
self.id = id
self.previous = [0.0, 0.0, 0.0]
def processGyro(self, resp):
gyro = [resp["data"]["x"], resp["data"]["y"], resp["data"]["z"]]
timeStamp = resp["timeStamp"]
event = False
for a in range(3):
if abs(gyro[a] - self.previous[a]) > config["gyro_min_change"]:
event = True
break
if event:
self.dm.storeGyro(self.id, timeStamp, gyro)
self.previous = gyro
class Magnet():
def __init__(self, id):
self.id = id
self.previous = [0.0, 0.0, 0.0]
def processMagnet(self, resp):
mag = [resp["data"]["x"], resp["data"]["y"], resp["data"]["z"]]
timeStamp = resp["timeStamp"]
event = False
for a in range(3):
if abs(mag[a] - self.previous[a]) > config["magnet_min_change"]:
event = True
break
if event:
self.dm.storeMagnet(self.id, timeStamp, mag)
self.previous = mag
class Humid():
""" Either send temp every minute or when it changes. """
def __init__(self, id):
self.id = id
self.previous = 0.0
def processHumidity (self, resp):
h = resp["data"]
timeStamp = resp["timeStamp"]
if abs(h-self.previous) >= config["humidity_min_change"]:
self.dm.storeHumidity(self.id, timeStamp, h)
self.previous = h
class Binary():
def __init__(self, id):
self.id = id
self.previous = 0
def processBinary(self, resp):
timeStamp = resp["timeStamp"]
b = resp["data"]
if b == "on":
bi = 1
else:
bi = 0
if bi != self.previous:
self.dm.storeBinary(self.id, timeStamp-1.0, self.previous)
self.dm.storeBinary(self.id, timeStamp, bi)
self.previous = bi
class Luminance():
def __init__(self, id):
self.id = id
self.previous = 0
def processLuminance(self, resp):
v = resp["data"]
timeStamp = resp["timeStamp"]
if abs(v-self.previous) >= config["luminance_min_change"]:
self.dm.storeLuminance(self.id, timeStamp, v)
self.previous = v
class App(CbApp):
def __init__(self, argv):
logging.basicConfig(filename=CB_LOGFILE,level=CB_LOGGING_LEVEL,format='%(asctime)s %(message)s')
self.appClass = "monitor"
self.state = "stopped"
self.status = "ok"
configFile = CB_CONFIG_DIR + "csv_writer.config"
global config
try:
with open(configFile, 'r') as configFile:
newConfig = json.load(configFile)
logging.info('%s Read eew_app.config', ModuleName)
config.update(newConfig)
except:
logging.warning('%s eew_app.config does not exist or file is corrupt', ModuleName)
for c in config:
if c.lower in ("true", "t", "1"):
config[c] = True
elif c.lower in ("false", "f", "0"):
config[c] = False
logging.debug('%s Config: %s', ModuleName, config)
self.accel = []
self.gyro = []
self.magnet = []
self.temp = []
self.irTemp = []
self.buttons = []
self.humidity = []
self.binary = []
self.luminance = []
self.power = []
self.devices = []
self.devServices = []
self.idToName = {}
#CbApp.__init__ MUST be called
CbApp.__init__(self, argv)
def setState(self, action):
if action == "clear_error":
self.state = "running"
else:
self.state = action
logging.debug("%s state: %s", ModuleName, self.state)
msg = {"id": self.id,
"status": "state",
"state": self.state}
self.sendManagerMessage(msg)
def onConcMessage(self, resp):
#logging.debug("%s resp from conc: %s", ModuleName, resp)
if resp["resp"] == "config":
msg = {
"msg": "req",
"verb": "post",
"channel": int(self.id[3:]),
"body": {
"msg": "services",
"appID": self.id,
"idToName": self.idToName,
"services": self.devServices
}
}
self.sendMessage(msg, "conc")
else:
msg = {"appID": self.id,
"msg": "error",
"message": "unrecognised response from concentrator"}
self.sendMessage(msg, "conc")
def onAdaptorData(self, message):
"""
This method is called in a thread by cbcommslib so it will not cause
problems if it takes some time to complete (other than to itself).
"""
#logging.debug("%s onadaptorData, message: %s", ModuleName, message)
if message["characteristic"] == "acceleration":
for a in self.accel:
if a.id == self.idToName[message["id"]]:
a.processAccel(message)
break
elif message["characteristic"] == "temperature":
for t in self.temp:
if t.id == self.idToName[message["id"]]:
t.processTemp(message)
break
elif message["characteristic"] == "ir_temperature":
for t in self.irTemp:
if t.id == self.idToName[message["id"]]:
t.processIrTemp(message)
break
elif message["characteristic"] == "gyro":
for g in self.gyro:
if g.id == self.idToName[message["id"]]:
g.processGyro(message)
break
elif message["characteristic"] == "magnetometer":
for g in self.magnet:
if g.id == self.idToName[message["id"]]:
g.processMagnet(message)
break
elif message["characteristic"] == "buttons":
for b in self.buttons:
if b.id == self.idToName[message["id"]]:
b.processButtons(message)
break
elif message["characteristic"] == "humidity":
for b in self.humidity:
if b.id == self.idToName[message["id"]]:
b.processHumidity(message)
break
elif message["characteristic"] == "binary_sensor":
for b in self.binary:
if b.id == self.idToName[message["id"]]:
b.processBinary(message)
break
elif message["characteristic"] == "power":
for b in self.power:
if b.id == self.idToName[message["id"]]:
b.processPower(message)
break
elif message["characteristic"] == "luminance":
for b in self.luminance:
if b.id == self.idToName[message["id"]]:
b.processLuminance(message)
break
def onAdaptorService(self, message):
#logging.debug("%s onAdaptorService, message: %s", ModuleName, message)
self.devServices.append(message)
serviceReq = []
for p in message["service"]:
# Based on services offered & whether we want to enable them
if p["characteristic"] == "temperature":
if config["temperature"] == 'True':
self.temp.append(TemperatureMeasure((self.idToName[message["id"]])))
self.temp[-1].dm = self.dm
serviceReq.append({"characteristic": "temperature",
"interval": config["slow_polling_interval"]})
elif p["characteristic"] == "ir_temperature":
if config["irtemperature"] == 'True':
self.irTemp.append(IrTemperatureMeasure(self.idToName[message["id"]]))
self.irTemp[-1].dm = self.dm
serviceReq.append({"characteristic": "ir_temperature",
"interval": config["slow_polling_interval"]})
elif p["characteristic"] == "acceleration":
if config["accel"] == 'True':
self.accel.append(Accelerometer((self.idToName[message["id"]])))
serviceReq.append({"characteristic": "acceleration",
"interval": config["accel_polling_interval"]})
self.accel[-1].dm = self.dm
elif p["characteristic"] == "gyro":
if config["gyro"] == 'True':
self.gyro.append(Gyro(self.idToName[message["id"]]))
self.gyro[-1].dm = self.dm
serviceReq.append({"characteristic": "gyro",
"interval": config["gyro_polling_interval"]})
elif p["characteristic"] == "magnetometer":
if config["magnet"] == 'True':
self.magnet.append(Magnet(self.idToName[message["id"]]))
self.magnet[-1].dm = self.dm
serviceReq.append({"characteristic": "magnetometer",
"interval": config["magnet_polling_interval"]})
elif p["characteristic"] == "buttons":
if config["buttons"] == 'True':
self.buttons.append(Buttons(self.idToName[message["id"]]))
self.buttons[-1].dm = self.dm
serviceReq.append({"characteristic": "buttons",
"interval": 0})
elif p["characteristic"] == "humidity":
if config["humidity"] == 'True':
self.humidity.append(Humid(self.idToName[message["id"]]))
self.humidity[-1].dm = self.dm
serviceReq.append({"characteristic": "humidity",
"interval": config["slow_polling_interval"]})
elif p["characteristic"] == "binary_sensor":
if config["binary"] == 'True':
self.binary.append(Binary(self.idToName[message["id"]]))
self.binary[-1].dm = self.dm
serviceReq.append({"characteristic": "binary_sensor",
"interval": 0})
elif p["characteristic"] == "luminance":
if config["luminance"] == 'True':
self.luminance.append(Luminance(self.idToName[message["id"]]))
self.luminance[-1].dm = self.dm
serviceReq.append({"characteristic": "luminance",
"interval": 0})
msg = {"id": self.id,
"request": "service",
"service": serviceReq}
self.sendMessage(msg, message["id"])
self.setState("running")
def onConfigureMessage(self, config):
""" Config is based on what sensors are available """
for adaptor in config["adaptors"]:
adtID = adaptor["id"]
if adtID not in self.devices:
# Because configure may be re-called if devices are added
name = adaptor["name"]
friendly_name = adaptor["friendly_name"]
logging.debug("%s Configure app. Adaptor name: %s", ModuleName, name)
self.idToName[adtID] = friendly_name.replace(" ", "_")
self.devices.append(adtID)
self.dm = DataManager(self.bridge_id)
self.dm.initFile(self.idToName)
self.setState("starting")
if __name__ == '__main__':
App(sys.argv)
| ContinuumBridge/csv_writer_app | csv_writer_a.py | Python | mit | 18,851 |
########################################################################################
## This file is a part of YAP package of scripts. https://github.com/shpakoo/YAP
## Distributed under the MIT license: http://www.opensource.org/licenses/mit-license.php
## Copyright (c) 2011-2013 Sebastian Szpakowski
########################################################################################
#################################################
## Given fasta and groups, create individual multifasta files.
#################################################
import sys
from optparse import OptionParser
_author="Sebastian Szpakowski"
_date="2011/01/01"
_version="Version 1"
#################################################
## Classes
##
#################################################
### Iterator over input fata file.
### Only reading when requested
### Useful for very large FASTA files
### with many sequences
class FastaParser:
def __init__ (self, x, quals=False):
self.filename = x
self.fp = open(x, "r")
self.currline = ""
self.currentFastaName = ""
self.currentFastaSequence = ""
self.lastitem=False
if quals:
self.linesep=" "
else:
self.linesep=""
def __iter__(self):
return(self)
#####
def next(self):
for self.currline in self.fp:
if self.currline.startswith(">"):
self.currline = self.currline[1:]
if self.currentFastaName == "":
self.currentFastaName = self.currline
else:
otpt = (self.currentFastaName.strip(), self.currentFastaSequence.strip())
self.currentFastaName = self.currline
self.currentFastaSequence = ""
self.previoustell = self.fp.tell()
return (otpt)
else:
self.addSequence(self.currline)
if not self.lastitem:
self.lastitem=True
return (self.currentFastaName.strip(), self.currentFastaSequence.strip())
else:
raise StopIteration
def addSequence(self, x):
self.currentFastaSequence = "%s%s%s" % (self.currentFastaSequence,self.linesep, x.strip())
def __str__():
return ("reading file: %s" %self.filename)
#################################################
### Iterator over input file.
### every line is converted into a dictionary with variables referred to by their
### header name
class GeneralPurposeParser:
def __init__(self, file, skip=0, sep="\t"):
self.filename = file
self.fp = open(self.filename, "r")
self.sep = sep
self.linecounter = 0
self.currline=""
def __iter__(self):
return (self)
def next(self):
otpt = dict()
for currline in self.fp:
currline = currline.strip().split(self.sep)
self.currline = currline
self.linecounter = self.linecounter + 1
return(currline)
raise StopIteration
def __str__(self):
return "%s [%s]\n\t%s" % (self.filename, self.linecounter, self.currline)
#################################################
## Functions
##
#################################################
## Arguments
##
parser = OptionParser()
#parser.add_option("-f", "--file", dest="filename",
# help="write report to FILE", metavar="FILE")
#parser.add_option("-q", "--quiet",
# action="store_false", dest="verbose", default=True,
# help="don't print status messages to stdout")
parser.add_option("-f", "--fasta", dest="fn_fasta", default="",
help="load fasta FILE", metavar="FILE")
parser.add_option("-g", "--groups", dest="fn_groups",
help="load group FILE", metavar="FILE")
(options, args) = parser.parse_args()
#################################################
## Begin
##
id2group = dict()
group2file = dict()
for id, group in GeneralPurposeParser(options.fn_groups, sep="\t"):
id2group[id] = group
if not group2file.has_key(group):
tmp = open("%s_16S.fasta" % group, "w")
group2file[group]=tmp
for head, seq in FastaParser(options.fn_fasta):
group = id2group [head]
otpt = group2file[group]
otpt.write(">%s\n%s\n" % (head, seq) )
#################################################
## Finish
#################################################
| shpakoo/YAP | UngroupFasta.py | Python | mit | 4,184 |
# -*- coding: utf-8 -*-
import os
import json
from django.urls import reverse
from seahub.test_utils import BaseTestCase
from seaserv import seafile_api
try:
from seahub.settings import LOCAL_PRO_DEV_ENV
except ImportError:
LOCAL_PRO_DEV_ENV = False
class ZipTaskViewTest(BaseTestCase):
def setUp(self):
self.repo_id = self.repo.id
self.folder_path = self.folder
self.folder_name = os.path.basename(self.folder_path)
self.url = reverse('api-v2.1-zip-task', args=[self.repo_id])
def tearDown(self):
self.remove_repo()
def test_can_get_download_dir_zip_token(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.user)
parent_dir = '/'
folder_name = self.folder_name
url = self.url + '?parent_dir=%s&dirents=%s' % (parent_dir, folder_name)
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['zip_token']) == 36
def test_can_get_download_multi_zip_token(self):
if not LOCAL_PRO_DEV_ENV:
return
# create another folder for download multi
another_folder_name = 'another_folder_name'
seafile_api.post_dir(repo_id=self.repo.id,
parent_dir='/', dirname=another_folder_name,
username=self.user.username)
self.login_as(self.user)
parent_dir = '/'
folder_name = self.folder_name
url = self.url + '?parent_dir=%s&dirents=%s&dirents=%s' % (parent_dir,
folder_name, another_folder_name)
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['zip_token']) == 36
def test_can_get_zip_token_with_invalid_repo_permission(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.admin)
parent_dir = '/'
folder_name = self.folder_name
url = self.url + '?parent_dir=%s&dirents=%s' % (parent_dir, folder_name)
resp = self.client.get(url)
self.assertEqual(403, resp.status_code)
def test_can_get_zip_token_for_r_permission_folder(self):
if not LOCAL_PRO_DEV_ENV:
return
self.set_user_folder_r_permission_to_admin()
self.login_as(self.admin)
parent_dir = '/'
folder_name = self.folder_name
url = self.url + '?parent_dir=%s&dirents=%s' % (parent_dir, folder_name)
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['zip_token']) == 36
def test_can_get_zip_token_for_rw_permission_folder(self):
if not LOCAL_PRO_DEV_ENV:
return
self.set_user_folder_rw_permission_to_admin()
self.login_as(self.admin)
parent_dir = '/'
folder_name = self.folder_name
url = self.url + '?parent_dir=%s&dirents=%s' % (parent_dir, folder_name)
resp = self.client.get(url)
self.assertEqual(200, resp.status_code)
json_resp = json.loads(resp.content)
assert len(json_resp['zip_token']) == 36
| miurahr/seahub | tests/api/endpoints/test_zip_task.py | Python | apache-2.0 | 3,279 |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Contains various functions and wrappers to make code Python 2 and Python 3
compatible.
"""
from future import standard_library
standard_library.install_aliases() # triggers E402, hence noqa below
import sys # noqa
import logging # noqa
logger = logging.getLogger(__name__)
PY3 = (sys.version_info[0] >= 3)
if PY3:
xrange = range
else:
xrange = xrange # pylint: disable=xrange-builtin
if not PY3:
import cPickle as the_pickle # noqa
else:
import pickle as the_pickle # noqa
pickle = the_pickle
def pickle_load(filepath):
"""
Py2Py3 compatible Pickle load
Arguments:
filepath (str): File containing pickle data stream to load
Returns:
Unpickled object
"""
if PY3:
return pickle.load(filepath, encoding='latin1')
else:
return pickle.load(filepath)
| NervanaSystems/neon | neon/util/compat.py | Python | apache-2.0 | 1,588 |
from typing import List
import numpy as np
from scipy.optimize import minimize
from src.interpolation.curve import Curve
from src.interpolation.interpolator import Interpolator
from src.utils import Math
class Energy:
@staticmethod
def accuracy(original_curve, curves: List[Curve]):
total_e = 0
for curve in curves:
n = 10
interps = curve.sample_uniform(n)
for interp in interps:
org_y = Math.sample_y_at_x_in_polyline(original_curve, interp[0])
e = np.power(org_y - interp[1], 2)
total_e += e
return total_e
@staticmethod
def accuracy_d(original_curve, curves: List[Curve]):
jacobian = [0.0 for _ in range(4 * len(curves))]
for i, curve in enumerate(curves):
n = 10
interps = curve.sample_uniform(n)
us = Curve.uniform_sample_set(n)
for u, interp in zip(us, interps):
org_y = Math.sample_y_at_x_in_polyline(original_curve, interp[0])
org_y_d = Math.sample_y_at_x_in_polyline_d(original_curve, interp[0])
jacobian[i * 4 + 0] += 2 * (interp[1] - org_y) * (-org_y_d * Curve.coefficient_b(u))
jacobian[i * 4 + 1] += 2 * (interp[1] - org_y) * Curve.coefficient_b(u)
jacobian[i * 4 + 2] += 2 * (interp[1] - org_y) * (-org_y_d * Curve.coefficient_c(u))
jacobian[i * 4 + 3] += 2 * (interp[1] - org_y) * Curve.coefficient_c(u)
return jacobian
@staticmethod
def colinear(_, curves):
total_e = 0
n = len(curves) - 1
for i in range(n):
x1, y1 = curves[i].get_c()
x2, y2 = curves[i + 1].get_b()
dx, dy = curves[i].get_d()
v1 = np.arctan2((dy - y1), (dx - x1))
v2 = np.arctan2((dy - y2), (dx - x2))
if v1 >= 0 and v2 >= 0:
pass
elif v2 <= 0 <= v1:
v2 += 2 * np.pi
elif v1 <= 0 <= v2:
v1 += 2 * np.pi
else:
v1 += 2 * np.pi
v2 += 2 * np.pi
angle = v1 - v2
abs_angle = np.sqrt(np.power(angle, 2))
e = np.power(abs_angle - np.pi, 2)
total_e += e
return total_e
@staticmethod
def colinear_d(_, curves):
j = []
j.append(0)
j.append(0)
n = len(curves) - 1
for i in range(n):
x1, y1 = curves[i].get_c()
x2, y2 = curves[i + 1].get_b()
dx, dy = curves[i].get_d()
v1 = np.arctan2((dy - y1), (dx - x1))
v2 = np.arctan2((dy - y2), (dx - x2))
sq_xy = np.power(dy - y1, 2) + np.power(dx - x1, 2)
sq_zw = np.power(dy - y2, 2) + np.power(dx - x2, 2)
if v1 >= 0 and v2 >= 0:
angle = v1 - v2
elif v1 >= 0 >= v2:
angle = v1 - v2 - 2 * np.pi
elif v1 <= 0 <= v2:
angle = v1 - v2 + 2 * np.pi
else:
angle = v1 - v2
abs_angle = np.sqrt(np.power(angle, 2))
j.append((2 * (dy - y1) * angle * (abs_angle - np.pi)) / (abs_angle * sq_xy))
j.append(-(2 * (dx - x1) * angle * (abs_angle - np.pi)) / (abs_angle * sq_xy))
j.append(-(2 * (dy - y2) * angle * (abs_angle - np.pi)) / (abs_angle * sq_zw))
j.append((2 * (dx - x2) * angle * (abs_angle - np.pi)) / (abs_angle * sq_zw))
j.append(0)
j.append(0)
return j
class OptimizationBasedCurveFitting(Interpolator):
weight_distance = 1.0
weight_colinear = 1000.0
def set_current_solution(self, solution):
n = len(self.curves)
for i in range(n):
x1 = solution[i * 4 + 0]
y1 = solution[i * 4 + 1]
x2 = solution[i * 4 + 2]
y2 = solution[i * 4 + 3]
self.curves[i].set_b(x1, y1)
self.curves[i].set_c(x2, y2)
self.curves[i].update()
def get_current_solution(self):
solution = []
n = len(self.curves)
for i in range(n):
x1, y1 = self.curves[i].get_b()
x2, y2 = self.curves[i].get_c()
solution += [x1, y1, x2, y2]
return solution
def evaluate(self):
es = [Energy.accuracy(self._curve, self.curves) * OptimizationBasedCurveFitting.weight_distance,
Energy.colinear(self._curve, self.curves) * OptimizationBasedCurveFitting.weight_colinear]
total_e = sum(es)
return total_e
def jacobian(self):
js = [0 for _ in range(4 * len(self.curves))]
for i, v in enumerate(Energy.accuracy_d(self._curve, self.curves)):
js[i] += v * OptimizationBasedCurveFitting.weight_distance
for i, v in enumerate(Energy.colinear_d(self._curve, self.curves)):
js[i] += v * OptimizationBasedCurveFitting.weight_colinear
return np.array(js)
def set_and_evaluate(self, solution):
self.set_current_solution(solution)
return self.evaluate(), self.jacobian()
def execute(self):
result = minimize(self.set_and_evaluate, self.get_current_solution(), method="BFGS", jac=True)
self.set_current_solution(result.x)
| richard-roberts/Mocappie | src/interpolation/obcf.py | Python | gpl-3.0 | 5,323 |
import json, io, math
import urllib2
unsupported_ISO4217_currency_code = ['GGP', 'CNH', 'CNT', 'JEP', 'IMP', 'KID', 'SLS', 'SLSH', 'PRB', 'TVD', 'BTC', 'XBT'];
def update():
data = json.load(urllib2.urlopen('https://openexchangerates.org/api/latest.json?app_id=756bb10cd2ca41e8a80f40a60e425864'))
with io.open('exchange_rates.txt', 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(data, ensure_ascii=False)))
print(str(data['rates']))
updated_exchange_rate_count = 0;
with io.open('exchange_rates.sql', 'w', encoding='utf-8') as f:
for key, value in data['rates'].iteritems():
intValue = math.trunc(value*100)
if intValue <= 0 or (key in unsupported_ISO4217_currency_code):
continue
sql = 'INSERT INTO exchange_rate(currency_code, exchange_rate) VALUES("{0}", {1})'.format(key, str(intValue))
f.write(unicode(sql + ';\r\n'))
updated_exchange_rate_count = updated_exchange_rate_count + 1
print('currency code=' + key + ', exchange rate=' + str(intValue) + '\n')
print('\nupdated exchange rate count: ' + str(updated_exchange_rate_count))
if __name__ == "__main__":
update()
| xiaolei/transaction | tools/update_exchange_rates.py | Python | apache-2.0 | 1,185 |
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2016, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
hiddenimports = ['sip']
from PyInstaller.utils.hooks import qt_plugins_binaries
binaries = qt_plugins_binaries('codecs', namespace='PyQt5')
| ijat/Hotspot-PUTRA-Auto-login | PyInstaller-3.2/PyInstaller/hooks/hook-PyQt5.QtCore.py | Python | gpl-3.0 | 552 |
#!/usr/bin/env python3
import PISM
from PISM.util import convert
from math import cos, pi
# Simple testing program for Lingle & Clark bed deformation model.
# Runs go for 150,000 years on 63.5km grid with 100a time steps and Z=2 in L&C model.
# SCENARIOS: run 'python bed_deformation.py -scenario N' where N=1,2,3,4 as follows
# (1) dump ice disc on initially level, non-uplifting land, use only viscous
# half-space model:
# include_elastic = FALSE, do_uplift = FALSE, H0 = 1000.0
# center depth b(0,0) should eventually equilibriate to near
# -1000 * (910/3300) = -275.76 m
# (2) dump ice disc on initially level, non-uplifting land, use both viscous
# half-space model and elastic model
# include_elastic = TRUE, do_uplift = FALSE, H0 = 1000.0
# (3) never loaded, initially level, uplifting land, use only viscous
# half-space model (because elastic model gives no additional when no load):
# include_elastic = FALSE, do_uplift = TRUE, H0 = 0.0
# (4) dump ice disc on initially level, uplifting land, use both viscous
# half-space model and elastic model:
# include_elastic = TRUE, do_uplift = TRUE, H0 = 1000.0;
ctx = PISM.Context()
config = ctx.config
R0 = 1000e3
def initialize_uplift(uplift):
"Initialize the uplift field."
grid = uplift.grid()
peak_uplift = convert(10, "mm/year", "m/second")
with PISM.vec.Access(nocomm=[uplift]):
for (i, j) in grid.points():
r = PISM.radius(grid, i, j)
if r < 1.5 * R0:
uplift[i, j] = peak_uplift * (cos(pi * (r / (1.5 * R0))) + 1.0) / 2.0
else:
uplift[i, j] = 0.0
def initialize_thickness(thickness, H0):
grid = thickness.grid()
with PISM.vec.Access(nocomm=[thickness]):
for (i, j) in grid.points():
r = PISM.radius(grid, i, j)
if r < R0:
thickness[i, j] = H0
else:
thickness[i, j] = 0.0
def allocate(grid):
H = PISM.model.createIceThicknessVec(grid)
bed = PISM.model.createBedrockElevationVec(grid)
uplift = PISM.IceModelVec2S()
uplift.create(grid, "uplift", PISM.WITHOUT_GHOSTS)
uplift.set_attrs("internal", "bed uplift", "m / second", "m / second", "", 0)
sea_level = PISM.IceModelVec2S(grid, "sea_level", PISM.WITHOUT_GHOSTS)
return H, bed, uplift, sea_level
def create_grid():
P = PISM.GridParameters(config)
P.horizontal_size_from_options()
P.horizontal_extent_from_options()
P.vertical_grid_from_options(config)
P.ownership_ranges_from_options(ctx.size)
return PISM.IceGrid(ctx.ctx, P)
def run(scenario, plot, pause, save):
# set grid defaults
config.set_number("grid.Mx", 193)
config.set_number("grid.My", 129)
config.set_number("grid.Lx", 3000e3)
config.set_number("grid.Ly", 2000e3)
config.set_number("grid.Mz", 2)
config.set_number("grid.Lz", 1000)
scenarios = {"1": (False, False, 1000.0),
"2": (True, False, 1000.0),
"3": (False, True, 0.0),
"4": (True, True, 1000.0)}
elastic, use_uplift, H0 = scenarios[scenario]
print("Using scenario %s: elastic model = %s, use uplift = %s, H0 = %f m" % (scenario, elastic, use_uplift, H0))
config.set_flag("bed_deformation.lc.elastic_model", elastic)
grid = create_grid()
thickness, bed, uplift, sea_level = allocate(grid)
# set initial geometry and uplift
bed.set(0.0)
thickness.set(0.0)
sea_level.set(0.0)
if use_uplift:
initialize_uplift(uplift)
time = ctx.ctx.time()
time.init(ctx.ctx.log())
model = PISM.LingleClark(grid)
model.bootstrap(bed, uplift, thickness, sea_level)
# now add the disc load
initialize_thickness(thickness, H0)
dt = convert(100, "365 day", "seconds")
# the time-stepping loop
while time.current() < time.end():
# don't go past the end of the run
dt_current = min(dt, time.end() - time.current())
model.update(thickness, sea_level, time.current(), dt_current)
if plot:
model.bed_elevation().view(400)
model.uplift().view(400)
print("t = %s years, dt = %s years" % (time.date(), time.convert_time_interval(dt_current, "years")))
time.step(dt_current)
print("Reached t = %s years" % time.date())
if pause:
print("Pausing for 5 seconds...")
PISM.PETSc.Sys.sleep(5)
if save:
model.bed_elevation().dump("bed_elevation.nc")
model.uplift().dump("bed_uplift.nc")
if __name__ == "__main__":
scenario = PISM.OptionKeyword("-scenario", "choose one of 4 scenarios", "1,2,3,4", "1")
plot = PISM.OptionBool("-plot", "Plot bed elevation and uplift.")
save = PISM.OptionBool("-save", "Save final states of the bed elevation and uplift.")
pause = PISM.OptionBool("-pause", "Pause for 5 seconds to look at runtime 2D plots.")
run(scenario.value(), plot, pause, save)
def scenario1_test():
"Test if scenario 1 runs"
run("1", False, False, False)
def scenario3_test():
"Test if scenario 3 runs"
run("3", False, False, False)
| pism/pism | examples/python/bed_deformation.py | Python | gpl-3.0 | 5,232 |
#!/usr/bin/python3
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from os import path
class DelDirDialog(Gtk.Dialog):
def __init__(self, parent, list_delicate):
Gtk.Dialog.__init__(self, "", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
self.set_modal(True)
self.set_resizable(False)
self.set_border_width(10)
# Result values:
self.dirname = None
self.diriter = None
box = self.get_content_area()
box.set_spacing(6)
combo = Gtk.ComboBox.new_with_model(list_delicate)
combo.set_hexpand(True)
combo.connect("changed", self.on_combo_changed)
renderer_text = Gtk.CellRendererText()
combo.pack_start(renderer_text, True)
combo.add_attribute(renderer_text, "text", 0)
box.pack_start(combo, False, False, True)
#box.add()
self.show_all()
def on_combo_changed(self, combo):
self.diriter = combo.get_active_iter()
if self.diriter is not None: # otherwise raise error when destroy dialog
self.dirname = combo.get_model().get_value(self.diriter, 0)
class ConfirmDialog(Gtk.Dialog):
def __init__(self, parent, orders):
Gtk.Dialog.__init__(self, "Confirm", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OK, Gtk.ResponseType.OK))
# Varaibles
self.parent = parent
self.orders = orders
# Properties
self.set_border_width(10)
self.set_modal(True)
self.set_default_size(800, 250)
# Content
self.box = self.get_content_area()
self.initialise_box()
self.show_all()
def initialise_box(self):
self.box.set_spacing(6)
box_wrapper = Gtk.Box()
files_lost = list()
conflict_files = list()
files_create = list()
for fileinfo in self.orders['conflicts']:
conflict_files.append(fileinfo[0])
for filename in self.orders['local']:
path_file = path.join(self.orders['paths'][0], filename)
if filename in conflict_files:
files_lost.append(path_file)
else:
files_create.append(path_file)
for filename in self.orders['ext']:
path_file = path.join(self.orders['paths'][1], filename)
if filename in conflict_files:
files_lost.append(path_file)
else:
files_create.append(path_file)
box_lost = Gtk.VBox()
box_lost.pack_start(Gtk.Label('These files will be lost:'), False, False, 5)
box_filenames = Gtk.VBox()
scrolled = Gtk.ScrolledWindow()
msg_label = ''
for filename in files_lost[:1000]:
msg_label += filename + '\n'
box_filenames.pack_start(Gtk.Label(msg_label), False, False, 1)
scrolled.add(box_filenames)
box_lost.pack_start(scrolled, True, True, 2)
box_create = Gtk.VBox()
box_create.pack_start(Gtk.Label('These files will be create:'), False, False, 5)
box_filenames = Gtk.VBox()
scrolled = Gtk.ScrolledWindow()
msg_label = ''
for filename in files_create:
msg_label += filename + '\n'
box_filenames.pack_start(Gtk.Label(msg_label), False, False, 1)
scrolled.add(box_filenames)
box_create.pack_start(scrolled, True, True, 2)
box_wrapper.pack_start(box_lost, True, True, 5)
box_wrapper.pack_start(box_create, True, True, 5)
self.box.pack_start(box_wrapper, True, True, 0)
class AbortDialog(Gtk.Dialog):
def __init__(self, parent):
Gtk.Dialog.__init__(self, "Work in progress...", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))
self.parent = parent
self.set_modal(True)
label = Gtk.Label("Work in progress...")
box = self.get_content_area()
box.add(label)
self.show_all()
def close(self):
self.destroy()
class ErrorsDialog(Gtk.Dialog):
def __init__(self, parent, errors):
Gtk.Dialog.__init__(self, "Files not found", parent, 0,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL))
self.parent = parent
self.set_modal(True)
msg = ''
for filename in errors:
msg += filename + '\n'
label = Gtk.Label(msg)
box = Gtk.VBox()
box.pack_start(Gtk.Label('These files were not found:'), True, False, 3)
box.pack_start(label, True, False, 3)
scrolled = Gtk.ScrolledWindow()
scrolled.set_min_content_height(300)
scrolled.add(box)
box = self.get_content_area()
box.add(scrolled)
self.show_all()
def folder_chooser(parent, is_folder=True, folder=None, msg=None):
if not is_folder:
if msg is None:
msg = "Select a folder"
dialog = Gtk.FileChooserDialog(msg, parent,
Gtk.FileChooserAction.OPEN,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
if folder:
dialog.set_current_folder(folder)
else:
if msg is None:
msg = "Select a file"
dialog = Gtk.FileChooserDialog(msg, parent,
Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
"Select", Gtk.ResponseType.OK))
dialog.set_default_size(800, 400)
response = dialog.run()
if response == Gtk.ResponseType.OK:
result = dialog.get_filename()
else:
result = None
dialog.destroy()
return result
| Thykof/SafeMyWork | interface/dialogs/dialog.py | Python | gpl-3.0 | 4,866 |
from tkinter import filedialog
from tkinter import *
from tkinter import ttk
import os, sys
import random
def popup():
popup = Tk()
popup.title("Done!")
root.minsize()
label = ttk.Label(popup, text="all files in ( {} ) are renamed".format(root.directory))
label.pack(side="top", fill="x", padx=5, pady=5)
B1 = ttk.Button(popup, text="Ok", command = popup.destroy)
B1.pack(pady=5)
popup.mainloop()
def select():
root.directory = filedialog.askdirectory()
def randomize():
print(root.directory)
for filename in os.listdir(root.directory):
src = os.path.join(root.directory, filename)
rand = str(random.randrange(100000, 999999))
newname = os.path.join(root.directory, rand)
spl = os.path.splitext(src)
os.rename(src, newname + spl[1])
popup()
root = Tk()
Button(root, text="Open Folder", command=select).pack(side="left", padx=1, pady=5)
Button(root, text="Randomize Names", command=randomize).pack(side="left", padx=1, pady=5)
Button(root, text="Quit", command=root.destroy).pack(side="left", padx=1, pady=5)
root.title("Randomizer")
root.minsize()
root.mainloop()
| obionar/randobi | randobi.py | Python | gpl-3.0 | 1,115 |
from os import scandir
from django.contrib import admin
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from django.conf import settings
from .models import *
class TalkGroupAdmin(admin.ModelAdmin):
search_fields = ['alpha_tag', 'description', 'dec_id']
list_display = ('alpha_tag', 'description', 'dec_id', 'system')
save_on_top = True
class UnitAdmin(admin.ModelAdmin):
search_fields = ['description', 'dec_id' ]
list_display = ('description', 'dec_id', 'system' )
save_on_top = True
class TranmissionUnitInline(admin.TabularInline):
model = TranmissionUnit
extra = 0 # how many rows to show
class TransmissionAdmin(admin.ModelAdmin):
#inlines = (TranmissionUnitInline,)
raw_id_fields = ('talkgroup_info', 'units', 'source', 'system')
save_on_top = True
class SourceInline(admin.TabularInline):
model = Source
readonly_fields=('id',)
class SourceAdmin(admin.ModelAdmin):
list_display = ('id','description')
list_display_links = ('id','description')
#fields = ('id','description')
save_on_top = True
def get_readonly_fields(self, request, obj=None):
if obj: # editing an existing object
return self.readonly_fields + ('id',)
return self.readonly_fields
class ScanListAdminForm(forms.ModelForm):
talkgroups = forms.ModelMultipleChoiceField(
queryset=TalkGroupWithSystem.objects.all(),
required=False,
widget=FilteredSelectMultiple(
verbose_name = 'talkgroups',
is_stacked=False
)
)
class Meta:
model = ScanList
fields = "__all__"
def __init__(self, *args, **kwargs):
super(ScanListAdminForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
self.fields['talkgroups'].initial = self.instance.talkgroups.all()
def save(self, commit=True):
scanlist = super(ScanListAdminForm, self).save(commit=False)
if commit:
scanlist.save()
if scanlist.pk:
scanlist.talkgroups.set(self.cleaned_data['talkgroups'])
self.save_m2m()
return scanlist
class ScanListAdmin(admin.ModelAdmin):
form = ScanListAdminForm
save_as = True
save_on_top = True
class ScanListRawAdmin(admin.ModelAdmin):
autocomplete_fields= ('talkgroups',)
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline, )
class TalkGroupAccessAdminForm(forms.ModelForm):
talkgroups = forms.ModelMultipleChoiceField(
queryset=TalkGroupWithSystem.objects.all(),
required=False,
widget=FilteredSelectMultiple(
verbose_name = 'talkgroups',
is_stacked=False
)
)
class Meta:
model = TalkGroupAccess
fields = "__all__"
def __init__(self, *args, **kwargs):
super(TalkGroupAccessAdminForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk:
self.fields['talkgroups'].initial = self.instance.talkgroups.all()
def save(self, commit=True):
tglist = super(TalkGroupAccessAdminForm, self).save(commit=False)
if commit:
tglist.save()
if tglist.pk:
tglist.talkgroups.set(self.cleaned_data['talkgroups'])
self.save_m2m()
return tglist
class TalkGroupAccessAdmin(admin.ModelAdmin):
form = TalkGroupAccessAdminForm
list_display = ('name', 'default_group', 'default_new_talkgroups')
save_on_top = True
class TalkGroupAccessRawAdmin(admin.ModelAdmin):
autocomplete_fields= ('talkgroups',)
class TranmissionUnitAdmin(admin.ModelAdmin):
raw_id_fields = ("transmission", "unit")
save_on_top = True
class IncidentAdmin(admin.ModelAdmin):
raw_id_fields = ("transmissions",)
save_on_top = True
class CityForms(forms.ModelForm):
google_maps_url = forms.CharField(max_length=1000)
class Meta:
model = City
fields = '__all__'
def clean_google_maps_url(self):
data = self.cleaned_data.get('google_maps_url', '')
parts = data.split('"')
new_url = None
try:
new_url = parts[1]
except IndexError:
return self
return new_url
class CityAdmin(admin.ModelAdmin):
form = CityForms
class MessagePopUpAdmin(admin.ModelAdmin):
list_display = ('mesg_type', 'mesg_html', 'active')
admin.site.register(Transmission, TransmissionAdmin)
admin.site.register(Unit,UnitAdmin)
#admin.site.register(TranmissionUnit, TranmissionUnitAdmin)
admin.site.register(TalkGroup, TalkGroupAdmin)
if not settings.USE_RAW_ID_FIELDS:
admin.site.register(ScanList, ScanListAdmin)
admin.site.register(TalkGroupAccess, TalkGroupAccessAdmin)
else:
admin.site.register(ScanList, ScanListRawAdmin)
admin.site.register(TalkGroupAccess, TalkGroupAccessRawAdmin)
admin.site.register(MenuScanList)
admin.site.register(MenuTalkGroupList)
admin.site.register(Source, SourceAdmin)
admin.site.register(Agency)
admin.site.register(Plan)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(System)
admin.site.register(WebHtml)
admin.site.register(RepeaterSite)
admin.site.register(Service)
admin.site.register(SiteOption)
admin.site.register(Incident, IncidentAdmin)
admin.site.register(City, CityAdmin)
admin.site.register(MessagePopUp, MessagePopUpAdmin)
| ScanOC/trunk-player | radio/admin.py | Python | mit | 5,683 |
"""
Course navigation page object
"""
import re
from bok_choy.page_object import PageObject, unguarded
from bok_choy.promise import EmptyPromise
class CourseNavPage(PageObject):
"""
Navigate sections and sequences in the courseware.
"""
url = None
def is_browser_on_page(self):
return self.q(css='div.course-index').present
@property
def sections(self):
"""
Return a dictionary representation of sections and subsections.
Example:
{
'Introduction': ['Course Overview'],
'Week 1': ['Lesson 1', 'Lesson 2', 'Homework']
'Final Exam': ['Final Exam']
}
You can use these titles in `go_to_section` to navigate to the section.
"""
# Dict to store the result
nav_dict = dict()
section_titles = self._section_titles()
# Get the section titles for each chapter
for sec_index, sec_title in enumerate(section_titles):
if len(section_titles) < 1:
self.warning("Could not find subsections for '{0}'".format(sec_title))
else:
# Add one to convert list index (starts at 0) to CSS index (starts at 1)
nav_dict[sec_title] = self._subsection_titles(sec_index + 1)
return nav_dict
@property
def sequence_items(self):
"""
Return a list of sequence items on the page.
Sequence items are one level below subsections in the course nav.
Example return value:
['Chemical Bonds Video', 'Practice Problems', 'Homework']
"""
seq_css = 'ol#sequence-list>li>.nav-item>.sequence-tooltip'
return self.q(css=seq_css).map(self._clean_seq_titles).results
def go_to_section(self, section_title, subsection_title):
"""
Go to the section in the courseware.
Every section must have at least one subsection, so specify
both the section and subsection title.
Example:
go_to_section("Week 1", "Lesson 1")
"""
# For test stability, disable JQuery animations (opening / closing menus)
self.browser.execute_script("jQuery.fx.off = true;")
# Get the section by index
try:
sec_index = self._section_titles().index(section_title)
except ValueError:
self.warning("Could not find section '{0}'".format(section_title))
return
# Click the section to ensure it's open (no harm in clicking twice if it's already open)
# Add one to convert from list index to CSS index
section_css = '.course-navigation .chapter:nth-of-type({0})'.format(sec_index + 1)
self.q(css=section_css).first.click()
# Get the subsection by index
try:
subsec_index = self._subsection_titles(sec_index + 1).index(subsection_title)
except ValueError:
msg = "Could not find subsection '{0}' in section '{1}'".format(subsection_title, section_title)
self.warning(msg)
return
# Convert list indices (start at zero) to CSS indices (start at 1)
subsection_css = (
".course-navigation .chapter-content-container:nth-of-type({0}) "
".menu-item:nth-of-type({1})"
).format(sec_index + 1, subsec_index + 1)
# Click the subsection and ensure that the page finishes reloading
self.q(css=subsection_css).first.click()
self._on_section_promise(section_title, subsection_title).fulfill()
def go_to_vertical(self, vertical_title):
"""
Within a section/subsection, navigate to the vertical with `vertical_title`.
"""
# Get the index of the item in the sequence
all_items = self.sequence_items
try:
seq_index = all_items.index(vertical_title)
except ValueError:
msg = "Could not find sequential '{0}'. Available sequentials: [{1}]".format(
vertical_title, ", ".join(all_items)
)
self.warning(msg)
else:
# Click on the sequence item at the correct index
# Convert the list index (starts at 0) to a CSS index (starts at 1)
seq_css = "ol#sequence-list>li:nth-of-type({0})>.nav-item".format(seq_index + 1)
self.q(css=seq_css).first.click()
# Click triggers an ajax event
self.wait_for_ajax()
def _section_titles(self):
"""
Return a list of all section titles on the page.
"""
chapter_css = '.course-navigation .chapter .group-heading'
return self.q(css=chapter_css).map(lambda el: el.text.strip()).results
def _subsection_titles(self, section_index):
"""
Return a list of all subsection titles on the page
for the section at index `section_index` (starts at 1).
"""
# Retrieve the subsection title for the section
# Add one to the list index to get the CSS index, which starts at one
subsection_css = (
".course-navigation .chapter-content-container:nth-of-type({0}) "
".menu-item a p:nth-of-type(1)"
).format(section_index)
# If the element is visible, we can get its text directly
# Otherwise, we need to get the HTML
# It *would* make sense to always get the HTML, but unfortunately
# the open tab has some child <span> tags that we don't want.
return self.q(
css=subsection_css
).map(
lambda el: el.text.strip().split('\n')[0] if el.is_displayed() else el.get_attribute('innerHTML').strip()
).results
def _on_section_promise(self, section_title, subsection_title):
"""
Return a `Promise` that is fulfilled when the user is on
the correct section and subsection.
"""
desc = "currently at section '{0}' and subsection '{1}'".format(section_title, subsection_title)
return EmptyPromise(
lambda: self.is_on_section(section_title, subsection_title), desc
)
@unguarded
def is_on_section(self, section_title, subsection_title):
"""
Return a boolean indicating whether the user is on the section and subsection
with the specified titles.
This assumes that the currently expanded section is the one we're on
That's true right after we click the section/subsection, but not true in general
(the user could go to a section, then expand another tab).
"""
current_section_list = self.q(css='.course-navigation .chapter.is-open .group-heading').text
current_subsection_list = self.q(css='.course-navigation .chapter-content-container .menu-item.active a p').text
if len(current_section_list) == 0:
self.warning("Could not find the current section")
return False
elif len(current_subsection_list) == 0:
self.warning("Could not find current subsection")
return False
else:
return (
current_section_list[0].strip() == section_title and
current_subsection_list[0].strip().split('\n')[0] == subsection_title
)
# Regular expression to remove HTML span tags from a string
REMOVE_SPAN_TAG_RE = re.compile(r'</span>(.+)<span')
def _clean_seq_titles(self, element):
"""
Clean HTML of sequence titles, stripping out span tags and returning the first line.
"""
return self.REMOVE_SPAN_TAG_RE.search(element.get_attribute('innerHTML')).groups()[0].strip()
@property
def active_subsection_url(self):
"""
return the url of the active subsection in the left nav
"""
return self.q(css='.chapter-content-container .menu-item.active a').attrs('href')[0]
| ampax/edx-platform | common/test/acceptance/pages/lms/course_nav.py | Python | agpl-3.0 | 7,904 |
#@help:nmap [IPv4 address] - Scans the ports of the device with the given address. USE AT OWN RISK.
#Taken from http://www.pythonforbeginners.com/code-snippets-source-code/port-scanner-in-python/
#
#Eddited a bit by Rsgm
from game.pythonapi.menu import PyMenu
import socket
import sys
from datetime import datetime
# Ask for input
remoteServer = options.valueOf('a')
remoteServerIP = socket.gethostbyname(remoteServer)
# Print a nice banner with information on which host we are about to scan
menu.write("-" * 60)
menu.write("Please wait, scanning remote host " + remoteServerIP)
menu.write("This could take a few minutes.")
menu.write("-" * 60)
# Check what time the scan started
t1 = datetime.now()
# Using the range function to specify ports (here it will scans all ports between 1 and 1024)
# We also put in some error handling for catching errors
for port in range(1,1024):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((remoteServerIP, port))
if result == 0:
menu.write("Port " + str(port) + ": Open")
sock.close()
except socket.gaierror:
menu.write('Hostname could not be resolved. Exiting')
sys.exit()
except socket.error:
print str(port) + " - closed"
# Checking the time again
t2 = datetime.now()
# Calculates the difference of time, to see how long it took to run the script
total = t2 - t1
# Printing the information to screen
menu.write('Scanning Completed in: ' + str(total))
#@parse_start
def getParser():
p = OptionParser()
p.acceptsAll(["h", "help"], "show help" ).forHelp
p.accepts("a", "sets the address to scan, this can be an IP or a URL").requiredUnless("h").withRequiredArg().describedAs("address")
return p
#@parse_end
| Rsgm/Hakd | core/assets/python/menu/nmap.py | Python | mit | 1,797 |
# -*- coding: utf-8 -*-
#
# jams documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 8 10:34:40 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# -- General configuration ------------------------------------------------
import os
import sys
sys.path.insert(0, os.path.abspath('../'))
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'numpydoc'
]
import glob
autosummary_generate = glob.glob('*.rst')
numpydoc_show_class_members = False
intersphinx_mapping = {'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'np': ('https://docs.scipy.org/doc/numpy/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'pd': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'mir_eval': ('https://craffel.github.io/mir_eval/', None),
'json': ('https://docs.python.org/2/', None),
'jsonschema': ('https://python-jsonschema.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
exclude_trees = ['_templates', '_build']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'jams'
copyright = u'2015, JAMS development team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import imp
jams_version = imp.load_source('jams.version', '../jams/version.py')
version = jams_version.short_version
# The full version, including alpha/beta/rc tags.
release = jams_version.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'autolink'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Mock
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = (['jsonschema', 'mir_eval', 'pandas', 'numpy',
'mir_eval.sonify', 'mir_eval.util', 'mir_eval.display',
'decorator',
'matplotlib', 'matplotlib.pyplot', 'matplotlib.offsetbox',
'sortedcontainers'])
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
import sphinx_rtd_theme
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
# MOCK_MODULES = ['numpy', 'pandas']
# sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
else:
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'jamsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'jams.tex', u'jams Documentation',
u'JAMS development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jams', u'jams Documentation',
[u'JAMS development team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'jams', u'jams Documentation', u'JAMS development team', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| marl/jams | docs/conf.py | Python | isc | 9,902 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.dtypes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _is_numeric_dtype_enum(datatype_enum):
non_numeric_dtypes = [types_pb2.DT_VARIANT,
types_pb2.DT_VARIANT_REF,
types_pb2.DT_INVALID,
types_pb2.DT_RESOURCE,
types_pb2.DT_RESOURCE_REF]
return datatype_enum not in non_numeric_dtypes
class TypesTest(test_util.TensorFlowTestCase):
def testAllTypesConstructible(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
self.assertEqual(datatype_enum,
dtypes.DType(datatype_enum).as_datatype_enum)
def testAllTypesConvertibleToDType(self):
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dt = dtypes.as_dtype(datatype_enum)
self.assertEqual(datatype_enum, dt.as_datatype_enum)
def testAllTypesConvertibleToNumpyDtype(self):
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
_ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
if dtype.base_dtype != dtypes.bfloat16:
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
self.assertEqual(
dtypes.as_dtype(datatype_enum).base_dtype,
dtypes.as_dtype(numpy_dtype))
def testInvalid(self):
with self.assertRaises(TypeError):
dtypes.DType(types_pb2.DT_INVALID)
with self.assertRaises(TypeError):
dtypes.as_dtype(types_pb2.DT_INVALID)
def testNumpyConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype(np.float32))
self.assertIs(dtypes.float64, dtypes.as_dtype(np.float64))
self.assertIs(dtypes.int32, dtypes.as_dtype(np.int32))
self.assertIs(dtypes.int64, dtypes.as_dtype(np.int64))
self.assertIs(dtypes.uint8, dtypes.as_dtype(np.uint8))
self.assertIs(dtypes.uint16, dtypes.as_dtype(np.uint16))
self.assertIs(dtypes.int16, dtypes.as_dtype(np.int16))
self.assertIs(dtypes.int8, dtypes.as_dtype(np.int8))
self.assertIs(dtypes.complex64, dtypes.as_dtype(np.complex64))
self.assertIs(dtypes.complex128, dtypes.as_dtype(np.complex128))
self.assertIs(dtypes.string, dtypes.as_dtype(np.object))
self.assertIs(dtypes.string,
dtypes.as_dtype(np.array(["foo", "bar"]).dtype))
self.assertIs(dtypes.bool, dtypes.as_dtype(np.bool))
with self.assertRaises(TypeError):
dtypes.as_dtype(np.dtype([("f1", np.uint), ("f2", np.int32)]))
def testRealDtype(self):
for dtype in [
dtypes.float32, dtypes.float64, dtypes.bool, dtypes.uint8, dtypes.int8,
dtypes.int16, dtypes.int32, dtypes.int64
]:
self.assertIs(dtype.real_dtype, dtype)
self.assertIs(dtypes.complex64.real_dtype, dtypes.float32)
self.assertIs(dtypes.complex128.real_dtype, dtypes.float64)
def testStringConversion(self):
self.assertIs(dtypes.float32, dtypes.as_dtype("float32"))
self.assertIs(dtypes.float64, dtypes.as_dtype("float64"))
self.assertIs(dtypes.int32, dtypes.as_dtype("int32"))
self.assertIs(dtypes.uint8, dtypes.as_dtype("uint8"))
self.assertIs(dtypes.uint16, dtypes.as_dtype("uint16"))
self.assertIs(dtypes.int16, dtypes.as_dtype("int16"))
self.assertIs(dtypes.int8, dtypes.as_dtype("int8"))
self.assertIs(dtypes.string, dtypes.as_dtype("string"))
self.assertIs(dtypes.complex64, dtypes.as_dtype("complex64"))
self.assertIs(dtypes.complex128, dtypes.as_dtype("complex128"))
self.assertIs(dtypes.int64, dtypes.as_dtype("int64"))
self.assertIs(dtypes.bool, dtypes.as_dtype("bool"))
self.assertIs(dtypes.qint8, dtypes.as_dtype("qint8"))
self.assertIs(dtypes.quint8, dtypes.as_dtype("quint8"))
self.assertIs(dtypes.qint32, dtypes.as_dtype("qint32"))
self.assertIs(dtypes.bfloat16, dtypes.as_dtype("bfloat16"))
self.assertIs(dtypes.float32_ref, dtypes.as_dtype("float32_ref"))
self.assertIs(dtypes.float64_ref, dtypes.as_dtype("float64_ref"))
self.assertIs(dtypes.int32_ref, dtypes.as_dtype("int32_ref"))
self.assertIs(dtypes.uint8_ref, dtypes.as_dtype("uint8_ref"))
self.assertIs(dtypes.int16_ref, dtypes.as_dtype("int16_ref"))
self.assertIs(dtypes.int8_ref, dtypes.as_dtype("int8_ref"))
self.assertIs(dtypes.string_ref, dtypes.as_dtype("string_ref"))
self.assertIs(dtypes.complex64_ref, dtypes.as_dtype("complex64_ref"))
self.assertIs(dtypes.complex128_ref, dtypes.as_dtype("complex128_ref"))
self.assertIs(dtypes.int64_ref, dtypes.as_dtype("int64_ref"))
self.assertIs(dtypes.bool_ref, dtypes.as_dtype("bool_ref"))
self.assertIs(dtypes.qint8_ref, dtypes.as_dtype("qint8_ref"))
self.assertIs(dtypes.quint8_ref, dtypes.as_dtype("quint8_ref"))
self.assertIs(dtypes.qint32_ref, dtypes.as_dtype("qint32_ref"))
self.assertIs(dtypes.bfloat16_ref, dtypes.as_dtype("bfloat16_ref"))
with self.assertRaises(TypeError):
dtypes.as_dtype("not_a_type")
def testDTypesHaveUniqueNames(self):
dtypez = []
names = set()
for datatype_enum in types_pb2.DataType.values():
if datatype_enum == types_pb2.DT_INVALID:
continue
dtype = dtypes.as_dtype(datatype_enum)
dtypez.append(dtype)
names.add(dtype.name)
self.assertEqual(len(dtypez), len(names))
def testIsInteger(self):
self.assertEqual(dtypes.as_dtype("int8").is_integer, True)
self.assertEqual(dtypes.as_dtype("int16").is_integer, True)
self.assertEqual(dtypes.as_dtype("int32").is_integer, True)
self.assertEqual(dtypes.as_dtype("int64").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint8").is_integer, True)
self.assertEqual(dtypes.as_dtype("uint16").is_integer, True)
self.assertEqual(dtypes.as_dtype("complex64").is_integer, False)
self.assertEqual(dtypes.as_dtype("complex128").is_integer, False)
self.assertEqual(dtypes.as_dtype("float").is_integer, False)
self.assertEqual(dtypes.as_dtype("double").is_integer, False)
self.assertEqual(dtypes.as_dtype("string").is_integer, False)
self.assertEqual(dtypes.as_dtype("bool").is_integer, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint32").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint8").is_integer, False)
self.assertEqual(dtypes.as_dtype("quint16").is_integer, False)
def testIsFloating(self):
self.assertEqual(dtypes.as_dtype("int8").is_floating, False)
self.assertEqual(dtypes.as_dtype("int16").is_floating, False)
self.assertEqual(dtypes.as_dtype("int32").is_floating, False)
self.assertEqual(dtypes.as_dtype("int64").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("uint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex64").is_floating, False)
self.assertEqual(dtypes.as_dtype("complex128").is_floating, False)
self.assertEqual(dtypes.as_dtype("float32").is_floating, True)
self.assertEqual(dtypes.as_dtype("float64").is_floating, True)
self.assertEqual(dtypes.as_dtype("string").is_floating, False)
self.assertEqual(dtypes.as_dtype("bool").is_floating, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_integer, False)
self.assertEqual(dtypes.as_dtype("qint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint16").is_floating, False)
self.assertEqual(dtypes.as_dtype("qint32").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint8").is_floating, False)
self.assertEqual(dtypes.as_dtype("quint16").is_floating, False)
def testIsComplex(self):
self.assertEqual(dtypes.as_dtype("int8").is_complex, False)
self.assertEqual(dtypes.as_dtype("int16").is_complex, False)
self.assertEqual(dtypes.as_dtype("int32").is_complex, False)
self.assertEqual(dtypes.as_dtype("int64").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("uint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("complex64").is_complex, True)
self.assertEqual(dtypes.as_dtype("complex128").is_complex, True)
self.assertEqual(dtypes.as_dtype("float32").is_complex, False)
self.assertEqual(dtypes.as_dtype("float64").is_complex, False)
self.assertEqual(dtypes.as_dtype("string").is_complex, False)
self.assertEqual(dtypes.as_dtype("bool").is_complex, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint16").is_complex, False)
self.assertEqual(dtypes.as_dtype("qint32").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint8").is_complex, False)
self.assertEqual(dtypes.as_dtype("quint16").is_complex, False)
def testIsUnsigned(self):
self.assertEqual(dtypes.as_dtype("int8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("int64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("uint8").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("uint16").is_unsigned, True)
self.assertEqual(dtypes.as_dtype("float32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("float64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bool").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("string").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex64").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("complex128").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("bfloat16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint16").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("qint32").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint8").is_unsigned, False)
self.assertEqual(dtypes.as_dtype("quint16").is_unsigned, False)
def testMinMax(self):
# make sure min/max evaluates for all data types that have min/max
for datatype_enum in types_pb2.DataType.values():
if not _is_numeric_dtype_enum(datatype_enum):
continue
dtype = dtypes.as_dtype(datatype_enum)
numpy_dtype = dtype.as_numpy_dtype
# ignore types for which there are no minimum/maximum (or we cannot
# compute it, such as for the q* types)
if (dtype.is_quantized or dtype.base_dtype == dtypes.bool or
dtype.base_dtype == dtypes.string or
dtype.base_dtype == dtypes.complex64 or
dtype.base_dtype == dtypes.complex128):
continue
print("%s: %s - %s" % (dtype, dtype.min, dtype.max))
# check some values that are known
if numpy_dtype == np.bool_:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 1)
if numpy_dtype == np.int8:
self.assertEquals(dtype.min, -128)
self.assertEquals(dtype.max, 127)
if numpy_dtype == np.int16:
self.assertEquals(dtype.min, -32768)
self.assertEquals(dtype.max, 32767)
if numpy_dtype == np.int32:
self.assertEquals(dtype.min, -2147483648)
self.assertEquals(dtype.max, 2147483647)
if numpy_dtype == np.int64:
self.assertEquals(dtype.min, -9223372036854775808)
self.assertEquals(dtype.max, 9223372036854775807)
if numpy_dtype == np.uint8:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 255)
if numpy_dtype == np.uint16:
if dtype == dtypes.uint16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 65535)
elif dtype == dtypes.bfloat16:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint32:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 4294967295)
if numpy_dtype == np.uint64:
self.assertEquals(dtype.min, 0)
self.assertEquals(dtype.max, 18446744073709551615)
if numpy_dtype in (np.float16, np.float32, np.float64):
self.assertEquals(dtype.min, np.finfo(numpy_dtype).min)
self.assertEquals(dtype.max, np.finfo(numpy_dtype).max)
def testRepr(self):
for enum, name in dtypes._TYPE_TO_STRING.items():
if enum > 100:
continue
dtype = dtypes.DType(enum)
self.assertEquals(repr(dtype), "tf." + name)
import tensorflow as tf
dtype2 = eval(repr(dtype))
self.assertEquals(type(dtype2), dtypes.DType)
self.assertEquals(dtype, dtype2)
def testEqWithNonTFTypes(self):
self.assertNotEqual(dtypes.int32, int)
self.assertNotEqual(dtypes.float64, 2.1)
if __name__ == "__main__":
googletest.main()
| shakamunyi/tensorflow | tensorflow/python/framework/dtypes_test.py | Python | apache-2.0 | 14,160 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def notify(_context, message):
"""Notifies the recipient of the desired event given the model"""
pass
| jessicalucci/NovaOrc | nova/openstack/common/notifier/no_op_notifier.py | Python | apache-2.0 | 747 |
WIDTH = 640
HEIGHT = 480
#
# Create a series of difficulty levels, specifying
# the speed & size of the ball and the width of the bat.
#
levels = [
{"speed" : 3, "ball_size" : (30, 30), "bat_width" : 150},
{"speed" : 4, "ball_size" : (24, 24), "bat_width" : 100},
{"speed" : 5, "ball_size" : (12, 12), "bat_width" : 72},
]
class Game(object): pass
game = Game()
game.score = 0
game.status = "Starting"
game.score_per_brick = 1
game.current_level = 0
game.scoreboard = []
#
# Create a status display, as wide as the screen and 60 pixels high.
# It's placed at the bottom of the screen
#
STATUS_DISPLAY = ZRect(0, HEIGHT - 60, WIDTH, 60)
#
# Create a game window which is as wide as the screen but allows
# a status display underneath
#
GAME_WINDOW = ZRect(0, 0, WIDTH, HEIGHT - STATUS_DISPLAY.height - 1)
GAME_WINDOW.background_colour = "darkblue"
GAME_WINDOW.frame_colour = "white"
class Ball(ZRect): pass
#
# The ball is a red square halfway across the game window
#
ball_size = levels[game.current_level]['ball_size']
ball = Ball(GAME_WINDOW.center, ball_size)
ball.colour = "red"
#
# The ball moves one step right and one step down each tick
#
ball.direction = 1, 1
#
# The ball moves at a speed of 3 steps each tick
#
ball.speed = levels[game.current_level]['speed']
class Bat(ZRect): pass
#
# The bat is a green oblong which starts just along the bottom
# of the game window and halfway across.
#
BAT_W = levels[game.current_level]['bat_width']
BAT_H = BAT_W / 10
bat = Bat(GAME_WINDOW.centerx, GAME_WINDOW.bottom - BAT_H, BAT_W, BAT_H)
bat.colour = "green"
class Brick(ZRect): pass
#
# The brick is a rectangle one eight the width of the game window
# and one quarter high as it is wide.
#
N_BRICKS = 8
BRICK_W = GAME_WINDOW.width / N_BRICKS
BRICK_H = BRICK_W / 4
BRICK_COLOURS = ["purple", "lightgreen", "lightblue", "orange"]
#
# The brick colours cycle through <BRICK_COLOURS>
#
bricks = []
def reset_game():
#
# At the beginning of the game, centre the ball on the game window
# and position the bat halfway across the game window and and sliding
# along its bottom edge.
#
ball.center = GAME_WINDOW.center
bat.center = (GAME_WINDOW.centerx, GAME_WINDOW.bottom - BAT_H)
#
# Create <N_BRICKS> blocks, filling the full width of the game window.
# Each brick is as high as a quarter of its width, so they remain
# proportional as the number of blocks or the screen size changes.
#
bricks.clear()
for n_brick in range(N_BRICKS):
brick = Brick(
GAME_WINDOW.left + (n_brick * BRICK_W), GAME_WINDOW.top,
BRICK_W, BRICK_H
)
brick.colour = BRICK_COLOURS[n_brick % len(BRICK_COLOURS)]
bricks.append(brick)
game.score = 0
game.current_level = 0
set_up_level()
def set_up_level():
level = levels[game.current_level]
ball.speed = level['speed']
ball.size = level['ball_size']
bat.width = level['bat_width']
bat.height = bat.width / 10
game.score_per_brick = 1 + game.current_level
def draw_scoreboard():
top_10_scores = sorted(game.scoreboard, reverse=True)[:10]
scoreline_height = GAME_WINDOW.height / 12
scoreline_box = ZRect(
GAME_WINDOW.left, GAME_WINDOW.top,
GAME_WINDOW.width, scoreline_height
)
screen.draw.textbox("Top 10 Scores", scoreline_box)
for n, score in enumerate(top_10_scores):
scoreline_y_offset = (2 + n) * scoreline_height;
scoreline_box = ZRect(
GAME_WINDOW.left, GAME_WINDOW.top + scoreline_y_offset,
GAME_WINDOW.width, scoreline_height
)
screen.draw.textbox("%1d - %s" % (1 + n, score), scoreline_box)
def draw():
#
# Clear the screen, draw the game window and place the ball at its current position
#
screen.clear()
#
# Draw the game window and a frame around it
#
screen.draw.filled_rect(GAME_WINDOW, GAME_WINDOW.background_colour)
screen.draw.rect(GAME_WINDOW.inflate(+2, +2), GAME_WINDOW.frame_colour)
#
# Fill in the status window
#
if game.status == "Starting":
#
# If the game is waiting to start indicate how to start
#
screen.draw.text("Press SPACE to start", center=STATUS_DISPLAY.center)
elif game.status == "Running":
#
# If the game is running show the current status, centred inside the status area
#
screen.draw.text(
"Score: %d" % game.score,
left=STATUS_DISPLAY.left + 4,
centery=STATUS_DISPLAY.centery
)
screen.draw.text(
"Status: %s" % game.status,
right=STATUS_DISPLAY.right - 4,
centery=STATUS_DISPLAY.centery
)
#
# Fill in the gameplay window
#
if game.status == "Starting":
#
# If the game is waiting to start, show the current high scoreboard
#
draw_scoreboard()
elif game.status == "Running":
screen.draw.filled_rect(ball, ball.colour)
screen.draw.filled_rect(bat, bat.colour)
for brick in bricks:
screen.draw.filled_rect(brick, brick.colour)
screen.draw.textbox("%s" % game.score_per_brick, brick)
def on_mouse_move(pos):
#
# Make the bat follow the horizontal movement of the mouse.
# Ensure that the bat does not move outside the game window.
#
if game.status == "Running":
x, y = pos
bat.centerx = x
bat.clamp_ip(GAME_WINDOW)
def on_mouse_down(button):
#
# If the right button is pressed make the game more difficult
# by shrinking the bat and the ball and increasing the speed.
# If the left button is pressed, make it easier again.
# The score on each brick goes up or down corresponding to
# how much harder / easier the game is.
#
if button == mouse.RIGHT:
if game.current_level < len(levels):
game.current_level += 1
elif button == mouse.LEFT:
if game.current_level > 0:
game.current_level -= 1
set_up_level ()
def on_key_down(key):
if game.status == "Starting":
if key == keys.SPACE:
reset_game()
game.status = "Running"
def update():
if game.status == "Running":
#
# Move the ball along its current direction at its current speed
#
dx, dy = ball.direction
ball.move_ip(ball.speed * dx, ball.speed * dy)
#
# Bounce the ball off the bat
#
if ball.colliderect(bat):
ball.direction = dx, -dy
#
# If the ball hits a brick, kill that brick and
# bounce the ball.
#
to_kill = ball.collidelist(bricks)
if to_kill >= 0:
bricks.pop(to_kill)
game.score += game.score_per_brick
ball.direction = dx, -dy
#
# Bounce the ball off the left or right walls
#
if ball.right >= GAME_WINDOW.right or ball.left <= GAME_WINDOW.left:
ball.direction = -dx, dy
#
# If the ball hits the bottom wall, you lose
#
if ball.bottom >= GAME_WINDOW.bottom:
game.status = "Starting"
#
# Bounce the ball off the top wall
#
if ball.top <= GAME_WINDOW.top:
ball.direction = dx, -dy
#
# If there are no bricks left, you win
#
if not bricks:
game.scoreboard.append(game.score)
game.status = "Starting" | westpark/wallball | docs/steps/code/s6c.py | Python | mit | 7,794 |
#! /usr/bin/env python
#
# Copyright (C) 2015 Rich Lewis <rl403@cam.ac.uk>
# License: 3-clause BSD
# TO DO - CONVERSION SCRIPTS | richlewis42/scikit-chem | scripts/convert.py | Python | bsd-3-clause | 128 |
"""
[2016-10-14] Challenge #287 [Hard] Word Numbers
https://www.reddit.com/r/dailyprogrammer/comments/57fzcv/20161014_challenge_287_hard_word_numbers/
# Description
Read the problem carefully and make sure you understand it. This is a hard problem, so if it seems straightforward, you
might be misreading something. Feel free to ask for clarification.
Consider the following procedure:
1. Take a list of the integers 1 through 999,999,999.
2. Write out each integer in English, so that you have 999,999,999 strings.
3. Sort the strings using [alphabetical order](https://en.wikipedia.org/wiki/Alphabetical_order).
4. Concatenate them all into one big string.
5. Take the first 51 billion (51,000,000,000) letters of this big string.
Now, you probably can't actually do this procedure. It would take too long or require too much memory. But determine
what, if you did this procedure, would be the answers to the following questions about your final, 51-billion-letter
string:
1. What is the last letter in your string?
2. What is the last number named in your string? (Hint: your string will end at the end of a number.)
3. What is the sum of all the numbers named in your string?
You must actually be able to answer all these questions. Writing a program that would theoretically find the answer
given a long time is **not** a valid solution to this problem. There's no strict runtime limit, but actually run your
program to completion and get the answers before posting your code. (If you want a goal, my Python solution takes 0.05
seconds, but that fast is not necessary.)
# Details
When you write the numbers out in step 2, omit spaces, punctuation, and the word "and". Examples of how this step looks:
100 -> onehundred
1709 -> onethousandsevenhundrednine
500000000 -> fivehundredmillion
911610034 -> ninehundredelevenmillionsixhundredtenthousandthirtyfour
The first word in this list after sorting alphabetically is `eight`, followed by `eighteen`, then `eighteenmillion`,
then `eighteenmillioneight`. Thus your final string will begin like this:
eighteighteeneighteenmillioneighteenmillioneight...
And be 51 billion letters long.
# Example
The procedure requires taking the first 51 billion letters in step 5. As an example, if instead I asked you to take the
first 28 letters in step 5, then your final string would be:
eighteighteeneighteenmillion
And the answers to the three questions would be:
1. N
2. 18000000 (eighteen million)
3. 18000026 (8 + 18 + 18000000)
# Bonus
Same procedure, except start with the integers 1 through 999,999,999,999 in step 1, and take the first 68 trillion
(68,000,000,000,000) letters in step 5. If I did it right (that's a big "if"), this will also end on a number name
boundary.
# Notes
This is an old ITA Software hiring puzzle, and the solution can be found in several places on the web (including
Reddit). So if you go looking for it, spoiler alert! On the other hand, it's easy to check your solution by doing a web
search for your answer to question #3.
*Thanks to u/wizao for posting this challenge to r/dailyprogrammer_ideas!*
"""
def main():
pass
if __name__ == "__main__":
main()
| DayGitH/Python-Challenges | DailyProgrammer/DP20161014C.py | Python | mit | 3,169 |
import subprocess
import StringIO
import glob
import json
import logging as log
import os
from collections import defaultdict
import time
from utils import TargetdError, invoke
DISCOVERY_METHODS = ["sendtargets", "isns"]
AUTH_METHODS = ["chap", "mutual_chap"]
def _checkfile(path):
return os.path.isfile(path) and path
ISCSIADM_BINARY = (_checkfile("/usr/bin/iscsiadm")
or _checkfile("/sbin/iscsiadm") or "iscsiadm")
ISCSI_ERR_LOGIN_AUTH_FAILED = 24
ISCSI_ERR_TRANS = 4
ISCSI_ERR_IDBM = 6
ISCSI_ERR_TRANS_TIMEOUT = 8
ISCSI_ERR_NO_OBJS_FOUND = 21
ISCSI_ERR_SESS_EXISTS = 15
ISCSI_ERR_ISNS_QUERY = 25
STRING_TOO_LONG = -20
EMPTY_STRING = -21
NO_ASCSII_STRING = -22
DISCOVERY_RECORD_NOT_FOUND = -23
INVALID_VALUE_DISCOVERY = -24
INVALID_VALUE_AUTH = -25
NO_ROUTE_TO_HOST = -26
LOGIN_FAILED = -27
SERVER_FAILURE = -28
NO_RECORDS_FOUND = -29
SESSION_LOGGED_IN = -30
NO_SESSION_INFO = -31
QUERY_FAILURE = -32
def initialize(config_dict):
return dict(
get_initiator_name=get_initiator_name,
delete_discovery=delete_discovery,
display_discovery=display_discovery,
display_discovery_summary=display_discovery_summary,
discover_portal=discover_portal,
login_target=login_target,
logout_target=logout_target,
logout_all_targets=logout_all_targets,
display_node=display_node,
display_node_summary=display_node_summary,
delete_node=delete_node,
delete_all_nodes=delete_all_nodes,
display_session=display_session,
purge=purge)
def nested_set(dic, keys, value):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
def discovery_node_parser(msg, mode):
"""
Takes a string as parameter and returns a dictionary
corresponding to a node or discovery record.
"""
d = defaultdict(dict)
for line in msg.splitlines():
if line.startswith(mode):
attribute, value = line.split("=")
attrs = attribute.split(".")
value = value.strip()
if value == "<empty>":
value = ""
for i in range(len(attrs)):
attrs[i] = attrs[i].strip()
nested_set(d, attrs, value)
return d[mode]
def discovery_summary_parser(msg):
"""
Takes a string as parameter and returns a dictionary
corresponding to all discovery records
"""
d = defaultdict(dict)
for line in msg.splitlines():
try:
key, method = [s.strip() for s in line.split(" via ")]
except ValueError:
continue
try:
hostname, port = key.split(":")
port = int(port)
d[hostname] = (port, method)
except KeyError:
pass
return d
def node_summary_parser(msg):
"""
Takes a string as parameter and returns a dictionary
corresponding to all node records
"""
d = defaultdict(dict)
KEYWORD_MAP = {"Portal": "portal", "Iface Name": "interface"}
for line in msg.splitlines():
try:
key, value = [s.strip() for s in line.split(": ")]
except ValueError:
continue
try:
if key == "Target":
target = value
elif key == "Portal":
host, tpg = value.split(",")
hostname, port = host.split(":")
host_tuple = (hostname, int(port), int(tpg))
d[target][hostname] = {KEYWORD_MAP[key]: host_tuple}
else:
d[target][hostname][KEYWORD_MAP[key]] = value
except KeyError:
pass
return d
def session_parser(msg):
"""
Takes a string as parameter and returns a dictionary
corresponding to a session
"""
d = defaultdict(dict)
KEYWORD_MAP = {"Current Portal": "portal", "Iface Transport": "transport",
"Iface Initiatorname": "initiator_name",
"Iface IPaddress": "ip_address", "SID": "session_id",
"iSCSI Connection State": "connection_state",
"iSCSI Session State": "session_state",
"Internal iscsid Session State": "iscsid_session_state"}
for line in msg.splitlines():
try:
key, value = [s.strip() for s in line.split(": ")]
except ValueError:
continue
try:
if key == "Target":
target = value
elif key == "Current Portal":
host, tpg = value.split(",")
hostname, port = host.split(":")
host_tuple = (hostname, int(port), int(tpg))
d[target][hostname] = {KEYWORD_MAP[key]: host_tuple}
else:
d[target][hostname][KEYWORD_MAP[key]] = value
except KeyError:
pass
return d
def validate_string(msg):
"""
Raise an error if the specified string is empty,
longer than 255 chars or not ASCII encoded
"""
if len(msg) > 255:
raise TargetdError(STRING_TOO_LONG, "String too long")
elif msg == "":
raise TargetdError(EMPTY_STRING, "Unauthorised empty string")
try:
msg.decode('ascii')
except UnicodeDecodeError:
raise TargetdError(NO_ASCSII_STRING,
"Not a ascii-encoded unicode string")
def get_error_code(iscsi_error_code):
"""
Returns the error code of our specification corresponding
to the error code of iscsi specification or -1 if unknown
"""
dict_error = {ISCSI_ERR_LOGIN_AUTH_FAILED: LOGIN_FAILED,
ISCSI_ERR_TRANS: NO_ROUTE_TO_HOST,
ISCSI_ERR_IDBM: DISCOVERY_RECORD_NOT_FOUND,
ISCSI_ERR_TRANS_TIMEOUT: SERVER_FAILURE,
ISCSI_ERR_NO_OBJS_FOUND: NO_RECORDS_FOUND,
ISCSI_ERR_SESS_EXISTS: SESSION_LOGGED_IN,
ISCSI_ERR_ISNS_QUERY: QUERY_FAILURE}
return dict_error.get(iscsi_error_code, -1)
def discovery_wrapper(hostname=None, discovery_method=None, operation=None,
op_params=(), discover=False):
"""
Returns the iscsiadm command, with discovery mode,
corresponding to different specified arguments
"""
cmd = [ISCSIADM_BINARY, "-m", "discoverydb"]
if hostname:
cmd.extend(["-p", hostname])
if discovery_method:
cmd.extend(["-t", discovery_method])
if operation:
cmd.extend(["-o", operation])
if op_params:
cmd.extend(["-n", "discovery.sendtargets.auth.%s" % op_params[0]])
cmd.extend(["-v", op_params[1]])
elif discover:
cmd.extend(["-D", "-P", "1"])
return_code, output_success, output_failure = invoke(cmd, False)
if return_code:
try:
error_string = output_failure.splitlines()[0].split(" ", 1)[-1]
error_string = error_string.strip()
# error_string extracts the text after "iscsiadm: " of the
# first line of e.output
error_code = get_error_code(return_code)
raise TargetdError(error_code, error_string)
except IndexError:
raise TargetdError(DISCOVERY_RECORD_NOT_FOUND,
"No discovery records found")
return output_success
def node_wrapper(targetname=None, hostname=None, operation="",
op_params=(), login_out=None):
"""
Returns the iscsiadm command, with node mode,
corresponding to different specified arguments
"""
cmd = [ISCSIADM_BINARY, "-m", "node", "-P", "1"]
if targetname:
cmd.extend(["-T", targetname])
if hostname:
cmd.extend(["-p", hostname])
if operation:
cmd.extend(["-o", operation])
if op_params:
cmd.extend(["-n", "node.session.auth.%s" % op_params[0]])
cmd.extend(["-v", op_params[1]])
elif login_out:
if login_out == "login":
cmd.append("--login")
if login_out == "logout":
cmd.append("--logout")
error_code, output_success, output_failure = invoke(cmd, False)
if error_code != 0:
error_string = output_failure.splitlines()[0].split(" ", 1)[-1].strip()
# error_string extracts the text after "iscsiadm: " of the
# first line of e.output
error_code = get_error_code(error_code)
raise TargetdError(error_code, error_string)
return output_success
def session_wrapper(session_id=None):
"""
Returns the iscsiadm command, with session mode,
corresponding to different specified arguments
"""
cmd = [ISCSIADM_BINARY, "-m", "session", "-P", "1"]
if session_id:
cmd.extend(["-r", session_id])
error_code, output_success, output_failure = invoke(cmd, False)
if error_code != 0:
error_string = output_failure.splitlines()[0].split(" ", 1)[-1].strip()
# error_string extracts the text after "iscsiadm: " of the
# first line of e.output
error_code = get_error_code(error_code)
raise TargetdError(error_code, error_string)
return output_success
def discover_portal(req, hostname, discovery_method="sendtargets",
auth_method=None, username=None, password=None,
username_in=None, password_in=None):
"""
Discover all targets for a given discovery portal
using specified informations
"""
validate_string(hostname)
if discovery_method not in DISCOVERY_METHODS:
raise TargetdError(INVALID_VALUE_DISCOVERY, "Invalid value."
" Possible values are : %s" %
", ".join(DISCOVERY_METHODS))
if auth_method in AUTH_METHODS:
validate_string(username)
validate_string(password)
discovery_wrapper(hostname, discovery_method, "new")
discovery_wrapper(hostname, discovery_method, "update",
("authmethod", "CHAP"))
discovery_wrapper(hostname, discovery_method, "update",
("username", username))
discovery_wrapper(hostname, discovery_method, "update",
("password", password))
if auth_method == "mutual_chap":
validate_string(username_in)
validate_string(password_in)
discovery_wrapper(hostname, discovery_method, "update",
("username_in", username_in))
discovery_wrapper(hostname, discovery_method, "update",
("password_in", password_in))
elif auth_method:
raise TargetdError(INVALID_VALUE_AUTH, "Invalid value."
" Possible values are : %s" %
", ".join(AUTH_METHODS))
output = discovery_wrapper(hostname, discovery_method,
discover=True)
return node_summary_parser(output)
def display_discovery(req, hostname, discovery_method="sendtargets"):
"""
Returns a dictionary of all data for the
discovery portal at the specified hostname
"""
validate_string(hostname)
if discovery_method not in DISCOVERY_METHODS:
raise TargetdError(INVALID_VALUE_DISCOVERY, "Invalid value."
" Possible values are : %s" %
", ".join(DISCOVERY_METHODS))
output = discovery_wrapper(hostname, discovery_method)
return discovery_node_parser(output, "discovery")
def display_discovery_summary(req):
"""
Returns a dictionary of all data for
all discovery records
"""
output = discovery_wrapper()
return discovery_summary_parser(output)
def delete_discovery(req, hostname, discovery_method="sendtargets"):
"""
Delete discovery of targets at the specified hostname
with the right discovery method
"""
validate_string(hostname)
if discovery_method not in DISCOVERY_METHODS:
raise TargetdError(INVALID_VALUE_DISCOVERY, "Invalid value."
" Possible values are : %s" %
", ".join(DISCOVERY_METHODS))
output = discovery_wrapper(hostname, discovery_method, "delete")
def delete_all_discoveries():
"""
Delete all discovery records
"""
d = display_discovery_summary(None)
for t in d:
delete_discovery(None, t, d[t][-1])
def get_initiator_name(req):
"""Return the iSCSI initiator IQN of this node."""
with open("/etc/iscsi/initiatorname.iscsi") as iqnfile:
for line in iqnfile:
if line.startswith("InitiatorName="):
return line.strip().split("=", 1)[1]
def login_target(req, targetname, hostname, auth_method=None,
username=None, password=None, username_in=None,
password_in=None):
"""
Login to a given target using specified informations
"""
validate_string(targetname)
validate_string(hostname)
# allow for the possibility that we're already logged in
devices = glob.glob("/dev/disk/by-path/*%s:*%s-lun-*" %
(hostname, targetname))
if len(devices) == 1:
return os.path.realpath(devices[0])
if auth_method in AUTH_METHODS:
validate_string(username)
validate_string(password)
node_wrapper(targetname, hostname, "update", ("authmethod", "CHAP"))
node_wrapper(targetname, hostname, "update", ("username", username))
node_wrapper(targetname, hostname, "update", ("password", password))
if auth_method == "mutual_chap":
validate_string(username_in)
validate_string(password_in)
node_wrapper(targetname, hostname, "update",
("username_in", username_in))
node_wrapper(targetname, hostname, "update",
("password_in", password_in))
elif auth_method:
raise TargetdError(INVALID_VALUE_AUTH, "Invalid value."
" Possible values are : %s" %
", ".join(AUTH_METHODS))
node_wrapper(targetname, hostname, login_out="login")
for delay in range(4):
time.sleep(delay)
devices = glob.glob("/dev/disk/by-path/*%s:*%s-lun-*" %
(hostname, targetname))
if len(devices) == 1:
return os.path.realpath(devices[0])
def logout_target(req, targetname, hostname=None):
"""
Logout for a given target using specified informations
"""
validate_string(targetname)
if hostname:
validate_string(hostname)
output = node_wrapper(targetname, hostname, login_out="logout")
def logout_all_targets(req):
"""
Logout for all targets
"""
output = node_wrapper(login_out="logout")
def display_node(req, targetname, hostname=None):
"""
Returns a dictionary of all data for the
discovery portal at the specified hostname
"""
validate_string(targetname)
if hostname:
validate_string(hostname)
output = node_wrapper(targetname, hostname)
return discovery_node_parser(output, "node")
def display_node_summary(req):
"""
Returns a dictionary of all data for
all node records
"""
output = node_wrapper()
return node_summary_parser(output)
def delete_node(req, targetname, hostname=None):
"""
Delete a given node record
using specified informations
"""
validate_string(targetname)
if hostname:
validate_string(hostname)
output = node_wrapper(targetname, hostname, "delete")
def delete_all_nodes(req):
"""
Delete all node records
"""
output = node_wrapper(operation="delete")
def display_session(req, targetname=None, hostname=None):
"""
Returns a dictionary of all data for all active sessions
If a session is specified, returns a dictionary of all data
for the given session using specified informations
"""
output = session_wrapper()
d = session_parser(output)
if targetname and hostname:
validate_string(targetname)
validate_string(hostname)
try:
return {targetname: {hostname: d[targetname][hostname]}}
except KeyError:
raise TargetdError(NO_SESSION_INFO, "Could not get session info")
elif targetname:
validate_string(targetname)
try:
return {targetname: d[targetname]}
except KeyError:
raise TargetdError(NO_SESSION_INFO, "Could not get session info")
else:
return d
def purge(req):
try:
logout_all_targets(None)
except TargetdError:
pass
try:
delete_all_discoveries()
except TargetdError:
pass
try:
delete_all_nodes(None)
except TargetdError:
pass
| MPSTOR/Konnector | targetd/iscsi_init.py | Python | gpl-3.0 | 16,737 |
'''
Created on Apr 23, 2012
@author: Stephen O'Hara
Uses iPython parallel library to implement a parallel version
of a proximity forest. The parallelized version will have nearly
the same interface as the serial implementation.
See the iPython website for details on setting up a cluster of
computational nodes using iPython parallel.
http://ipython.org/ipython-doc/dev/parallel/index.html
Copyright (C) 2012 Stephen O'Hara
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import proximityforest as pf
import os
class ParallelProximityForest(pf.ProximityForest):
'''
A parallelized version of a proximity forest.
Public interface is same as ProximityForest, but
requires a handle to an iPython parallel client
object for distributing the workload. Also note
that the design assumes there is a shared disk
area common to all computing nodes.
'''
def __init__(self, ipp_client, N, treeClass=pf.ProximityTree, dist_func=None, **kwargs):
'''
Constructor
All parameters except one are the same as for the base class ProximityForest. Also,
there is no trees kwarg because we dont want to instantiate large trees on one node
and then push them to other computing nodes. Instead, if you want to instantiate a
parallel forest from saved files, use the "loadParallelProximityForest" function instead.
@param ipp_client: The ipython parallel client object, which will have information about
the available computing engines in the environment.
@type ipp_client: IPython.parallel.client
'''
self.client = ipp_client
self.tree_kwargs = kwargs
self.treeClass = treeClass
self.N = N
self.treeDistrib = self._computeTreeDistribution()
cidxs = range(len(self.client))
print "Clearing old data from remote engines..."
dview = self.client[:]
dview.execute("import proximityforest as pf", block=True)
try:
dview.execute("del(forest); del(samples); del(labels)")
dview.purge_results('all')
except:
print "...deletion operation failed."
pass #likely the variables don't exist yet on remote nodes
funcName = dist_func.func_name
for ci in cidxs:
print "Creating new forest on node %d..."%ci
forest = pf.ProximityForest(self.treeDistrib[ci], treeClass=treeClass, **kwargs)
self.client[ci].push({funcName:dist_func},block=True)
self.client[ci].push({'forest':forest},block=True)
#Hook up the custom distance function. I am unable to figure out how to do that
# in one shot. It may be because when we push an object (like a forest) to another
# computing node, it is pickled during transfer. Pickling doesn't like functions...
#So we pass over a forest with no custom dist_func, and then manually hook it up.
dview.execute('for tree in forest.trees: tree.dist_func=%s'%funcName, block=True)
def __str__(self):
return "%s of %d Trees divided over %d Nodes"%(type(self).__name__, self.N, len(self.client))
def __len__(self):
'''
Return the number of trees in the forest.
'''
return self.N
def __getitem__(self, i):
'''
Support indexing the forest to get the nth tree. forest[n] is nth tree.
NOTE: Not currently implemented for parallel forests because we don't want
to schlep around large trees between computing nodes.
Descendant classes that create trees based only on indexes into a shared
memory structure may be okay to override this method and return a copy of
a tree built on a remote node...
'''
raise NotImplementedError
def _computeTreeDistribution(self):
'''
Given the desired number of trees and the number of computing nodes available
in the ipp_client, this computes a list that indicates how many trees we need
to distribute to each client.
'''
treeDist = [ self.N / len(self.client) ] * len(self.client)
for i in range( self.N % len(self.client)):
treeDist[i] += 1
return treeDist
def clear(self):
dview = self.client[:]
dview.execute('forest.clear()', block=True) #wait until all remote forests are done
def add(self, T, Label):
for cx in range(len(self.client)):
self.client[cx].push({'T':T, 'Label':Label}, block=True)
dview = self.client[:]
dview.execute('forest.add(T,Label)', block=True) #wait until all remote forests are done
def addList(self, samples, labels):
print "Pushing samples and labels to remote computing nodes..."
for cx in range(len(self.client)):
self.client[cx].push({'logfile':'proximity_forest_%d_build.log'%cx})
self.client[cx].push({'samples':samples, 'labels':labels}, block=True)
print "Adding samples to remote forests..."
dview = self.client[:]
dview.execute('forest.addList(samples,labels, build_log=logfile)', block=True) #wait until all remote forests are done
def save(self, base_dir, forest_name):
'''
Saves the forest as a set of files in the forest_name
subdirectory of base_dir. NOTE: It is assumed that the computing nodes share
a common storage system so that base_dir will resolve to the same directory
for all nodes.
The following files will be created
1) tree_<num>.p, one pickle file for each tree in the forest,
and <num> will be a zero-padded number, like 001, 002, ..., 999.
2) forest_info.p, a single file with information about the forest
'''
d = os.path.join(base_dir,forest_name)
if not os.path.exists(d): os.makedirs(d, 0777)
for cx in range(len(self.client)):
#each node will have a different forest_idx value, so that their trees will not overwrite each other
self.client[cx].push({'base_dir':base_dir, 'forest_name':forest_name, 'forest_idx':cx}, block=True)
dview = self.client[:]
dview.execute('forest.save(base_dir,forest_name,forest_idx)', block=True) #wait until all nodes finished saving
print "Forest saved to directory %s"%d
def getKNearestFromEachTree(self, T, K):
'''
@return: A list of lists representing the k-nearest samples to T that are
found in each tree in the forest.
'''
dview = self.client[:]
dview.block = True
dview['T'] = T
dview['K'] = K
dview.execute('knnList=forest.getKNearestFromEachTree(T,K)', block=True)
KNN_List = []
for ci in range(len(self.client)):
tmp = self.client[ci]['knnList'] #get results from sub-forest on computing node ci
KNN_List += tmp
return KNN_List
def getKNearest(self, T, K):
'''
Returns the K-nearest-neighbors in the forest.
'''
dview = self.client[:]
dview.block = True
dview['T'] = T
dview['K'] = K
dview.execute('knn=forest.getKNearest(T,K)', block=True)
KNN_List = []
for ci in range(len(self.client)):
KNN = self.client[ci]['knn'] #get results from computing node ci
for item in KNN: KNN_List.append(item)
KNNs = list(set(KNN_List)) #remove duplicates b/c many trees will return the same answer as closest, etc.
return sorted(KNNs)[0:K] #like this, if K=3: [ (d1,T1,L1), (d2,T2,L2), (d3,T3,L3)]
def loadParallelProximityForest(base_dir, forest_name, dist_func=None, tree_idxs=None):
'''
Loads a saved proximity forest into a ParallelProximityForest structure distributed over
a set of computing nodes.
'''
raise NotImplemented
| svohara/proximityforest | proximityforest/ANN/Parallel.py | Python | gpl-3.0 | 8,714 |
#!/usr/bin/env python
# -*- coding: UTF-8-*-
# clean *.wav files older than 3 days
import glob
import os
import time
now = time.time()
os.chdir("C:\\Users\\nio\Desktop\\bc_TTS\\wav")
for filename in glob.glob("*.wav"):
if (os.stat(filename).st_mtime < (now - 1*86400)):
print "removing:", filename
os.remove(filename)
| nio101/BASECAMP | source/voice_synthesis/cleanup.py | Python | gpl-3.0 | 327 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'app.settings')
application = get_wsgi_application()
| ByteInternet/django-oidc-provider | example/app/wsgi.py | Python | mit | 165 |
# -*- coding: utf-8 -*-
from __future__ import division
import logging
from numpy import minimum as min_, maximum as max_
from openfisca_france.model.base import * # noqa analysis:ignore
log = logging.getLogger(__name__)
# Csg déductible
class f6de(Variable):
cerfa_field = u"6DE"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"CSG déductible calculée sur les revenus du patrimoine"
# Pensions alimentaires
class f6gi(Variable):
cerfa_field = u"6GI"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Pensions alimentaires versées à des enfants majeurs (décision de justice définitive avant 2006): 1er enfant"
class f6gj(Variable):
cerfa_field = u"6GJ"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Pensions alimentaires versées à des enfants majeurs (décision de justice définitive avant 2006): 2eme enfant"
class f6el(Variable):
cerfa_field = u"6EL"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Autres pensions alimentaires versées à des enfants majeurs: 1er enfant"
start_date = date(2006, 1, 1)
class f6em(Variable):
cerfa_field = u"6EM"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Autres pensions alimentaires versées à des enfants majeurs: 2eme enfant"
start_date = date(2006, 1, 1)
class f6gp(Variable):
cerfa_field = u"6GP"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Autres pensions alimentaires versées décision de justice définitive avant 2006 (mineurs, ascendants)"
class f6gu(Variable):
cerfa_field = u"6GU"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Autres pensions alimentaires versées (mineurs, ascendants)"
start_date = date(2006, 1, 1)
# Frais d'accueil d'une personne de plus de 75 ans dans le besoin
class f6eu(Variable):
cerfa_field = u"6EU"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Frais d'accueil de personnes de plus de 75 ans dans le besoin"
class f6ev(Variable):
cerfa_field = u"6EV"
column = PeriodSizeIndependentIntCol
entity_class = FoyersFiscaux
label = u"Nombre de personnes de plus de 75 ans dans le besoin accueillies sous votre toit"
# Déductions diverses
class f6dd(Variable):
cerfa_field = u"6DD"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Déductions diverses"
# Épargne retraite - PERP, PRÉFON, COREM et CGOS
class f6ps(Variable):
cerfa_field = {QUIFOY['vous']: u"6PS",
QUIFOY['conj']: u"6PT",
QUIFOY['pac1']: u"6PU",
}
column = IntCol(val_type = "monetary")
entity_class = Individus
label = u"Plafond de déduction épargne retraite (plafond calculé sur les revenus perçus en n-1)"
# (f6ps, f6pt, f6pu)
class f6rs(Variable):
cerfa_field = {QUIFOY['vous']: u"6RS",
QUIFOY['conj']: u"6RT",
QUIFOY['pac1']: u"6RU",
}
column = IntCol(val_type = "monetary")
entity_class = Individus
label = u"Cotisations d'épargne retraite versées au titre d'un PERP, PREFON, COREM et C.G.O.S"
# (f6rs, f6rt, f6ru)))
class f6ss(Variable):
cerfa_field = {QUIFOY['vous']: u"6SS",
QUIFOY['conj']: u"6ST",
QUIFOY['pac1']: u"6SU",
}
column = IntCol(val_type = "monetary")
entity_class = Individus
label = u"Rachat de cotisations PERP, PREFON, COREM et C.G.O.S"
# (f6ss, f6st, f6su)))
# Souscriptions en faveur du cinéma ou de l’audiovisuel
class f6aa(Variable):
cerfa_field = u"6AA"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Souscriptions en faveur du cinéma ou de l’audiovisuel"
start_date = date(2005, 1, 1)
stop_date = date(2006, 12, 31)
# TODO: ancien numéro de case, antérieur à 2008 ....au moins! vérifier pour 07-06-05 ect...probablement avant 2005 (autre nom en 12 et 13)
# Souscriptions au capital des SOFIPÊCHE
class f6cc(Variable):
cerfa_field = u"CC"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Souscriptions au capital des SOFIPÊCHE"
start_date = date(2005, 1, 1)
stop_date = date(2005, 12, 31)
# ancien numéro de case, antérieur à 2008 ....au moins vérifier pour 07-06-05 ect...probablement avant 2005 (autre nom en 12 et13)
# Investissements DOM-TOM dans le cadre d’une entreprise < = 2005
# ou Versements sur un compte épargne codéveloppement
class f6eh(Variable):
cerfa_field = u"EH"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
start_date = date(2005, 1, 1)
stop_date = date(2005, 12, 31)
# TODO: vérifier date de début et de fin de cette case (rien en 12 et 13)
# Pertes en capital consécutives à la souscription au capital de sociétés
# nouvelles ou de sociétés en difficulté
class f6da(Variable):
cerfa_field = u"DA"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Pertes en capital consécutives à la souscription au capital de sociétés nouvelles ou de sociétés en difficulté"
start_date = date(2005, 1, 1)
stop_date = date(2005, 12, 31)
# Dépenses de grosses réparations effectuées par les nus propriétaires
class f6cb(Variable):
cerfa_field = u"6CB"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Dépenses de grosses réparations effectuées par les nus-propriétaires (dépenses réalisées au cours de l'année de perception des revenus)"
start_date = date(2009, 1, 1)
# TODO: before 2006 wasPertes en capital consécutives à la souscription au capital de sociétés nouvelles ou de sociétés en difficulté (cases CB et DA de la déclaration complémentaire)
class f6hj(Variable):
cerfa_field = u"6HJ"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Dépenses de grosses réparations effectuées par les nus-propriétaires: report des dépenses des années antérieures"
start_date = date(2010, 1, 1)
class f6hk(Variable):
cerfa_field = u"6HK"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Dépenses de grosses réparations effectuées par les nus-propriétaires: report des dépenses des années antérieures"
start_date = date(2011, 1, 1)
class f6hl(Variable):
cerfa_field = u"6HL"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Dépenses de grosses réparations effectuées par les nus-propriétaires: report des dépenses des années antérieures"
start_date = date(2012, 1, 1)
class f6hm(Variable):
cerfa_field = u"6HM"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Dépenses de grosses réparations effectuées par les nus-propriétaires: report des dépenses des années antérieures"
start_date = date(2013, 1, 1)
# Sommes à rajouter au revenu imposable
class f6gh(Variable):
cerfa_field = u"6GH"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Sommes à ajouter au revenu imposable"
# Deficits antérieurs
class f6fa(Variable):
cerfa_field = u"6FA"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Deficits globaux des années antérieures non encore déduits les années précédentes: année de perception des revenus -6"
class f6fb(Variable):
cerfa_field = u"6FB"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Deficits globaux des années antérieures non encore déduits: année de perception des revenus -5"
class f6fc(Variable):
cerfa_field = u"6FC"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Deficits globaux des années antérieures non encore déduits: année de perception des revenus -4"
class f6fd(Variable):
cerfa_field = u"6FD"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Deficits globaux des années antérieures non encore déduits: année de perception des revenus -3"
class f6fe(Variable):
cerfa_field = u"6FE"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Deficits globaux des années antérieures non encore déduits: année de perception des revenus -2"
class f6fl(Variable):
cerfa_field = u"6FL"
column = IntCol(val_type = "monetary")
entity_class = FoyersFiscaux
label = u"Deficits globaux des années antérieures non encore déduits: année de perception des revenus -1"
class rfr_cd(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"Charges déductibles entrant dans le revenus fiscal de référence"
url = "http://impotsurlerevenu.org/definitions/215-charge-deductible.php"
def function(self, simulation, period):
period = period.this_year
cd_acc75a = simulation.calculate('cd_acc75a', period)
cd_doment = simulation.calculate('cd_doment', period)
cd_eparet = simulation.calculate('cd_eparet', period)
cd_sofipe = simulation.calculate('cd_sofipe', period)
return period, cd_acc75a + cd_doment + cd_eparet + cd_sofipe
class cd1(DatedVariable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"Charges déductibles non plafonnées"
url = "http://impotsurlerevenu.org/definitions/215-charge-deductible.php"
@dated_function(start = date(2002, 1, 1), stop = date(2003, 12, 31))
def function_20020101_20031231(self, simulation, period):
'''
Renvoie la liste des charges déductibles avant rbg_int pour 2002
'''
period = period.this_year
cd_penali = simulation.calculate('cd_penali', period)
cd_acc75a = simulation.calculate('cd_acc75a', period)
cd_percap = simulation.calculate('cd_percap', period)
cd_deddiv = simulation.calculate('cd_deddiv', period)
cd_doment = simulation.calculate('cd_doment', period)
niches1 = cd_penali + cd_acc75a + cd_percap + cd_deddiv + cd_doment
return period, niches1
@dated_function(start = date(2004, 1, 1), stop = date(2005, 12, 31))
def function_20040101_20051231(self, simulation, period):
'''
Renvoie la liste des charges déductibles avant rbg_int pour 2004
'''
period = period.this_year
cd_penali = simulation.calculate('cd_penali', period)
cd_acc75a = simulation.calculate('cd_acc75a', period)
cd_percap = simulation.calculate('cd_percap', period)
cd_deddiv = simulation.calculate('cd_deddiv', period)
cd_doment = simulation.calculate('cd_doment', period)
cd_eparet = simulation.calculate('cd_eparet', period)
niches1 = cd_penali + cd_acc75a + cd_percap + cd_deddiv + cd_doment + cd_eparet
return period, niches1
@dated_function(start = date(2006, 1, 1), stop = date(2006, 12, 31))
def function_20060101_20061231(self, simulation, period):
'''
Renvoie la liste des charges déductibles avant rbg_int pour 2006
'''
period = period.this_year
cd_penali = simulation.calculate('cd_penali', period)
cd_acc75a = simulation.calculate('cd_acc75a', period)
cd_percap = simulation.calculate('cd_percap', period)
cd_deddiv = simulation.calculate('cd_deddiv', period)
cd_eparet = simulation.calculate('cd_eparet', period)
niches1 = cd_penali + cd_acc75a + cd_percap + cd_deddiv + cd_eparet
return period, niches1
@dated_function(start = date(2007, 1, 1), stop = date(2008, 12, 31))
def function_20070101_20081231(self, simulation, period):
'''
Renvoie la liste des charges déductibles avant rbg_int pour 2007
'''
period = period.this_year
cd_penali = simulation.calculate('cd_penali', period)
cd_acc75a = simulation.calculate('cd_acc75a', period)
cd_deddiv = simulation.calculate('cd_deddiv', period)
cd_eparet = simulation.calculate('cd_eparet', period)
niches1 = cd_penali + cd_acc75a + cd_deddiv + cd_eparet
return period, niches1
@dated_function(start = date(2009, 1, 1), stop = date(2013, 12, 31))
def function_20090101_20131231(self, simulation, period):
'''
Renvoie la liste des charges déductibles avant rbg_int pour 2009
'''
period = period.this_year
cd_penali = simulation.calculate('cd_penali', period)
cd_acc75a = simulation.calculate('cd_acc75a', period)
cd_deddiv = simulation.calculate('cd_deddiv', period)
cd_eparet = simulation.calculate('cd_eparet', period)
cd_grorep = simulation.calculate('cd_grorep', period)
niches1 = cd_penali + cd_acc75a + cd_deddiv + cd_eparet + cd_grorep
return period, niches1
@dated_function(start = date(2014, 1, 1), stop = date(2014, 12, 31))
def function_20140101_20141231(self, simulation, period):
'''
Renvoie la liste des charges déductibles avant rbg_int pour 2014
'''
period = period.this_year
cd_penali = simulation.calculate('cd_penali', period)
cd_acc75a = simulation.calculate('cd_acc75a', period)
cd_deddiv = simulation.calculate('cd_deddiv', period)
cd_eparet = simulation.calculate('cd_eparet', period)
cd_grorep = simulation.calculate('cd_grorep', period)
niches1 = cd_penali + cd_acc75a + cd_deddiv + cd_eparet + cd_grorep
# log.error("Charges déductibles to be checked because not defined for %s", 2014)
return period, niches1
class cd2(DatedVariable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"Charges déductibles plafonnées"
url = "http://impotsurlerevenu.org/definitions/215-charge-deductible.php"
@dated_function(start = date(2002, 1, 1), stop = date(2005, 12, 31))
def function_20020101_20051231(self, simulation, period):
'''
Renvoie la liste des charges déductibles à intégrer après le rbg_int
'''
period = period.this_year
cd_sofipe = simulation.calculate('cd_sofipe', period)
cd_cinema = simulation.calculate('cd_cinema', period)
niches2 = cd_sofipe + cd_cinema
return period, niches2
@dated_function(start = date(2006, 1, 1), stop = date(2006, 12, 31))
def function_20060101_20061231(self, simulation, period):
'''
Renvoie la liste des charges déductibles à intégrer après le rbg_int
'''
period = period.this_year
cd_sofipe = simulation.calculate('cd_sofipe', period)
niches2 = cd_sofipe
return period, niches2
@dated_function(start = date(2007, 1, 1), stop = date(2008, 12, 31))
def function_20070101_20081231(self, simulation, period):
'''
Renvoie la liste des charges déductibles à intégrer après le rbg_int
'''
period = period.this_year
cd_ecodev = simulation.calculate('cd_ecodev', period)
niches2 = cd_ecodev
return period, niches2
class rbg_int(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"Revenu brut global intermédiaire"
def function(self, simulation, period):
period = period.this_year
rbg = simulation.calculate('rbg', period)
cd1 = simulation.calculate('cd1', period)
return period, max_(rbg - cd1, 0)
class charges_deduc(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"Charges déductibles"
url = "http://impotsurlerevenu.org/definitions/215-charge-deductible.php"
def function(self, simulation, period):
period = period.this_year
cd1 = simulation.calculate('cd1', period)
cd2 = simulation.calculate('cd2', period)
return period, cd1 + cd2
class cd_penali(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_penali"
url = "http://frederic.anne.free.fr/Cours/ITV.htm"
def function(self, simulation, period):
'''
Pensions alimentaires
'''
period = period.this_year
f6gi = simulation.calculate('f6gi', period)
f6gj = simulation.calculate('f6gj', period)
f6gp = simulation.calculate('f6gp', period)
f6el = simulation.calculate('f6el', period)
f6em = simulation.calculate('f6em', period)
f6gu = simulation.calculate('f6gu', period)
penalim = simulation.legislation_at(period.start).ir.charges_deductibles.penalim
max1 = penalim.max
taux_jgt_2006 = penalim.taux_jgt_2006
# TODO: si vous subvenez seul(e) à l'entretien d'un enfant marié ou
# pacsé ou chargé de famille, quel que soit le nmbre d'enfants du jeune
# foyer, la déduction est limitée à 2*max
# S'il habite chez ses parents, max 3359, sinon 5698
return period, (min_(f6gi * (1 + taux_jgt_2006), max1) +
min_(f6gj * (1 + taux_jgt_2006), max1) +
min_(f6el, max1) +
min_(f6em, max1) +
f6gp * (1 + taux_jgt_2006) + f6gu)
class cd_acc75a(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_acc75a"
def function(self, simulation, period):
'''
Frais d’accueil sous votre toit d’une personne de plus de 75 ans
'''
period = period.this_year
f6eu = simulation.calculate('f6eu', period)
f6ev = simulation.calculate('f6ev', period)
acc75a = simulation.legislation_at(period.start).ir.charges_deductibles.acc75a
amax = acc75a.max * max_(1, f6ev)
return period, min_(f6eu, amax)
class cd_percap(DatedVariable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_percap"
@dated_function(start = date(2002, 1, 1), stop = date(2002, 12, 31))
def function_20020101_20021231(self, simulation, period):
'''
Pertes en capital consécutives à la souscription au capital de sociétés
nouvelles ou de sociétés en difficulté (cases CB et DA de la déclaration
complémentaire)
'''
period = period.this_year
f6cb = simulation.calculate('f6cb', period)
maries_ou_pacses = simulation.calculate('maries_ou_pacses', period)
percap = simulation.legislation_at(period.start).ir.charges_deductibles.percap
max_cb = percap.max_cb * (1 + maries_ou_pacses)
return period, min_(f6cb, max_cb)
@dated_function(start = date(2003, 1, 1), stop = date(2006, 12, 31))
def function_20030101_20061231(self, simulation, period):
'''
Pertes en capital consécutives à la souscription au capital de sociétés
nouvelles ou de sociétés en difficulté (cases CB et DA de la déclaration
complémentaire)
'''
period = period.this_year
f6cb = simulation.calculate('f6cb', period)
f6da = simulation.calculate('f6da', period)
maries_ou_pacses = simulation.calculate('maries_ou_pacses', period)
percap = simulation.legislation_at(period.start).ir.charges_deductibles.percap
max_cb = percap.max_cb * (1 + maries_ou_pacses)
max_da = percap.max_da * (1 + maries_ou_pacses)
return period, min_(min_(f6cb, max_cb) + min_(f6da, max_da), max_da)
class cd_deddiv(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_deddiv"
def function(self, simulation, period):
'''
Déductions diverses (case DD)
'''
period = period.this_year
f6dd = simulation.calculate('f6dd', period)
return period, f6dd
class cd_doment(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_doment"
start_date = date(2002, 1, 1)
stop_date = date(2005, 12, 31)
def function(self, simulation, period):
'''
Investissements DOM-TOM dans le cadre d’une entreprise (case EH de la
déclaration n° 2042 complémentaire)
2002-2005
'''
period = period.this_year
f6eh = simulation.calculate('f6eh', period)
return period, f6eh
class cd_eparet(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_eparet"
start_date = date(2004, 1, 1)
def function(self, simulation, period):
'''
Épargne retraite - PERP, PRÉFON, COREM et CGOS
2004-
'''
period = period.this_year
f6ps_holder = simulation.compute('f6ps', period)
f6rs_holder = simulation.compute('f6rs', period)
f6ss_holder = simulation.compute('f6ss', period)
f6ps = self.filter_role(f6ps_holder, role = VOUS)
f6pt = self.filter_role(f6ps_holder, role = CONJ)
f6pu = self.filter_role(f6ps_holder, role = PAC1)
f6rs = self.filter_role(f6rs_holder, role = VOUS)
f6rt = self.filter_role(f6rs_holder, role = CONJ)
f6ru = self.filter_role(f6rs_holder, role = PAC1)
f6ss = self.filter_role(f6ss_holder, role = VOUS)
f6st = self.filter_role(f6ss_holder, role = CONJ)
f6su = self.filter_role(f6ss_holder, role = PAC1)
# TODO: En théorie, les plafonds de déductions (ps, pt, pu) sont calculés sur
# le formulaire 2041 GX
return period, ((f6ps == 0) * (f6rs + f6ss) +
(f6ps != 0) * min_(f6rs + f6ss, f6ps) +
(f6pt == 0) * (f6rt + f6st) +
(f6pt != 0) * min_(f6rt + f6st, f6pt) +
(f6pu == 0) * (f6ru + f6su) +
(f6pu != 0) * min_(f6ru + f6su, f6pu))
class cd_sofipe(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_sofipe"
start_date = date(2002, 1, 1)
stop_date = date(2006, 12, 31)
def function(self, simulation, period):
'''
Souscriptions au capital des SOFIPÊCHE (case CC de la déclaration
complémentaire)
2002-2006
'''
period = period.this_year
f6cc = simulation.calculate('f6cc', period)
rbg_int = simulation.calculate('rbg_int', period)
maries_ou_pacses = simulation.calculate('maries_ou_pacses', period)
sofipe = simulation.legislation_at(period.start).ir.charges_deductibles.sofipe
max1 = min_(sofipe.taux * rbg_int, sofipe.max * (1 + maries_ou_pacses))
return period, min_(f6cc, max1)
class cd_cinema(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_cinema"
start_date = date(2002, 1, 1)
stop_date = date(2005, 12, 31)
def function(self, simulation, period):
'''
Souscriptions en faveur du cinéma ou de l’audiovisuel (case AA de la
déclaration n° 2042 complémentaire)
2002-2005
'''
period = period.this_year
f6aa = simulation.calculate('f6aa', period)
rbg_int = simulation.calculate('rbg_int', period)
cinema = simulation.legislation_at(period.start).ir.charges_deductibles.cinema
max1 = min_(cinema.taux * rbg_int, cinema.max)
return period, min_(f6aa, max1)
class cd_ecodev(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_ecodev"
start_date = date(2007, 1, 1)
stop_date = date(2008, 12, 31)
def function(self, simulation, period):
'''
Versements sur un compte épargne codéveloppement (case EH de la déclaration
complémentaire)
2007-2008
'''
period = period.this_year
f6eh = simulation.calculate('f6eh', period)
rbg_int = simulation.calculate('rbg_int', period)
ecodev = simulation.legislation_at(period.start).ir.charges_deductibles.ecodev
max1 = min_(ecodev.taux * rbg_int, ecodev.max)
return period, min_(f6eh, max1)
class cd_grorep(Variable):
column = FloatCol(default = 0)
entity_class = FoyersFiscaux
label = u"cd_grorep"
start_date = date(2009, 1, 1)
def function(self, simulation, period):
'''
Dépenses de grosses réparations des nus-propriétaires (case 6CB et 6HJ)
2009-
'''
period = period.this_year
f6cb = simulation.calculate('f6cb', period)
f6hj = simulation.calculate('f6hj', period)
f6hk = simulation.calculate('f6hk', period)
f6hl = simulation.calculate('f6hl', period)
grorep = simulation.legislation_at(period.start).ir.charges_deductibles.grorep
return period, min_(f6cb + f6hj + f6hk + f6hl, grorep.max)
| benjello/openfisca-france | openfisca_france/model/prelevements_obligatoires/impot_revenu/charges_deductibles.py | Python | agpl-3.0 | 25,157 |
print "importing stuff..."
import numpy as np
import pdb
import matplotlib.pylab as plt
from scipy import special
from .datautils import step, spiral
from .context import vfe
def run_regression_1D_collapsed():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
mean, var = m.predict_f(xx, alpha)
zu = m.zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
# pdb.set_trace()
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var),
mean[:, 0] + 2 * np.sqrt(var),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
alpha = 0.0001
model = vfe.SGPR_collapsed(X, Y, M)
# model.update_hypers(params)
model.optimise(method='L-BFGS-B', alpha=alpha, maxiter=2000)
# plot(model)
# plt.show()
def run_step_1D_collapsed():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1) * 3 - 1.5
Y = step(X)
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-3, 3, 100)[:, None]
mean, var = m.predict_f(xx, alpha)
zu = m.zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var),
mean[:, 0] + 2 * np.sqrt(var),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
# no_samples = 20
# f_samples = m.sample_f(xx, no_samples)
# for i in range(no_samples):
# plt.plot(xx, f_samples[:, :, i], linewidth=0.5, alpha=0.5)
plt.xlim(-3, 3)
# inference
print "create model and optimize ..."
M = 20
alpha = 0.01
model = vfe.SGPR_collapsed(X, Y, M)
model.optimise(method='L-BFGS-B', alpha=alpha, maxiter=1000)
plot(model)
plt.show()
def run_regression_1D(nat_param=True):
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-0.5, 1.5, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layer.zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
model = vfe.SGPR(X, Y, M, lik='Gaussian', nat_param=nat_param)
model.optimise(method='L-BFGS-B', maxiter=20000)
# model.optimise(method='adam', adam_lr=0.05, maxiter=2000)
plot(model)
plt.show()
def run_step_1D():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1) * 3 - 1.5
Y = step(X)
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-3, 3, 100)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layer.zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
no_samples = 20
xx = np.linspace(-3, 3, 500)[:, None]
f_samples = m.sample_f(xx, no_samples)
for i in range(no_samples):
plt.plot(xx, f_samples[:, :, i], linewidth=0.5, alpha=0.5)
plt.xlim(-3, 3)
# inference
print "create model and optimize ..."
M = 20
model = vfe.SGPR(X, Y, M, lik='Gaussian')
model.optimise(method='L-BFGS-B', maxiter=2000)
plot(model)
plt.show()
def run_banana():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(
xx, yy, mf.reshape(*xx.shape), [0],
colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt(
'./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt(
'./examples/data/banana_Y_train.txt', delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 50
model = vfe.SGPR(Xtrain, Ytrain, M, lik='Probit')
model.optimise(method='L-BFGS-B', maxiter=2000)
plot(model)
plt.show()
def run_regression_1D_stoc():
np.random.seed(42)
print "create dataset ..."
N = 200
X = np.random.rand(N, 1)
Y = np.sin(12 * X) + 0.5 * np.cos(25 * X) + np.random.randn(N, 1) * 0.2
# plt.plot(X, Y, 'kx', mew=2)
def plot(m):
xx = np.linspace(-1.5, 2.5, 200)[:, None]
mean, var = m.predict_f(xx)
zu = m.sgp_layer.zu
mean_u, var_u = m.predict_f(zu)
plt.figure()
plt.plot(X, Y, 'kx', mew=2)
plt.plot(xx, mean, 'b', lw=2)
plt.fill_between(
xx[:, 0],
mean[:, 0] - 2 * np.sqrt(var[:, 0]),
mean[:, 0] + 2 * np.sqrt(var[:, 0]),
color='blue', alpha=0.2)
plt.errorbar(zu, mean_u, yerr=2 * np.sqrt(var_u), fmt='ro')
plt.xlim(-0.1, 1.1)
# inference
print "create model and optimize ..."
M = 20
model = vfe.SGPR(X, Y, M, lik='Gaussian')
model.optimise(method='adam',
maxiter=100000, mb_size=N, adam_lr=0.001)
# plot(model)
# plt.show()
# plt.savefig('/tmp/vfe_gpr_1D_stoc.pdf')
def run_banana_stoc():
def gridParams():
mins = [-3.25, -2.85]
maxs = [3.65, 3.4]
nGrid = 50
xspaced = np.linspace(mins[0], maxs[0], nGrid)
yspaced = np.linspace(mins[1], maxs[1], nGrid)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return mins, maxs, xx, yy, Xplot
def plot(m):
col1 = '#0172B2'
col2 = '#CC6600'
mins, maxs, xx, yy, Xplot = gridParams()
mf, vf = m.predict_f(Xplot)
plt.figure()
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == 1],
Xtrain[:, 1][Ytrain[:, 0] == 1],
'o', color=col1, mew=0, alpha=0.5)
plt.plot(
Xtrain[:, 0][Ytrain[:, 0] == -1],
Xtrain[:, 1][Ytrain[:, 0] == -1],
'o', color=col2, mew=0, alpha=0.5)
zu = m.sgp_layer.zu
plt.plot(zu[:, 0], zu[:, 1], 'ro', mew=0, ms=4)
plt.contour(
xx, yy, mf.reshape(*xx.shape), [0],
colors='k', linewidths=1.8, zorder=100)
Xtrain = np.loadtxt(
'./examples/data/banana_X_train.txt', delimiter=',')
Ytrain = np.loadtxt(
'./examples/data/banana_Y_train.txt', delimiter=',').reshape(-1, 1)
Ytrain[np.where(Ytrain == 0)[0]] = -1
M = 30
model = vfe.SGPR(Xtrain, Ytrain, M, lik='Probit')
model.optimise(method='adam',
maxiter=100000, mb_size=M, adam_lr=0.001)
plot(model)
plt.show()
# plt.savefig('/tmp/vfe_gpc_banana_stoc.pdf')
if __name__ == '__main__':
# run_regression_1D_collapsed()
# run_step_1D_collapsed()
run_regression_1D(True)
run_regression_1D(False)
# run_step_1D()
# run_banana()
# run_regression_1D_stoc()
# run_banana_stoc()
| thangbui/geepee | examples/gpr_vfe_examples.py | Python | mit | 8,888 |
#!/bin/python3
import datetime, os, re, urllib.request, html.parser, locale, sys
from bs4 import BeautifulSoup
from os.path import expanduser
if sys.platform == "win32":
locale.setlocale(locale.LC_ALL, 'fra')
else:
locale.setlocale(locale.LC_ALL, 'fr_FR.utf8')
home = expanduser("~")
now = datetime.datetime.now()#Get today's date
os.chdir(home)
os.chdir('Desktop') #Go to Desktop folder
Base_folder = r'Breviaire_%s-%s-%s' % (now.strftime("%d"), now.strftime("%m"), now.strftime("%y")) #All files will be stored in this date-stamped folder
if not os.path.exists(Base_folder): os.makedirs(Base_folder) #Create a folder with today's date
os.chdir(Base_folder) #Go to the freshly created folder
if now.weekday() != 6:
idx = (now.weekday() + 1)
else:
idx = 0
Base_date = now - datetime.timedelta(idx) #Get last Sunday's date
print('Création du dossier %s sur le Bureau\nLe programme téléchargera les textes pour 4 semaines.\nÀ la fin, ajoutez le ficher index.html en Calibre.\n' % Base_folder)
next_date = Base_date
user_agent = 'Mozilla/5.0 (X11; Linux x86_64; rv:56.0) Gecko/20100101 Firefox/56.0'
headers={'User-Agent':user_agent,}
main_index = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<meta content="AELF" name="Author"/>
<meta http-equiv="content-language" content="fr">
<title>Liturgie des Heures</title>
</head>
<body LANG=fr>
<div>
<h1>Liturgie des Heures</h1>
</div>
<div style="line-height:150%; text-align: center;">
"""
def aelf_unescape(link,filename):
try:
request=urllib.request.Request(link,None,headers) #The assembled request
response = urllib.request.urlopen(request)
html_heure = response.read()
html_heure = html_heure.decode('utf-8')
output_file = open(filename, "w", encoding='utf-8')
output_file.write(str(html_heure))
except urllib.error.URLError as e:
print(e.reason)
return
#Download the files for 4 weeks
for i in range(1, 29):
print ('Téléchargement des textes de %s-%s-%s...' % (next_date.strftime("%d"), next_date.strftime("%m"), next_date.strftime("%y")))
next_folder = r'%s-%s-%s' % (next_date.strftime("%y"), next_date.strftime("%m"), next_date.strftime("%d"))
if not os.path.exists(next_folder): os.makedirs(next_folder)
os.chdir(next_folder)
site_date = "%s-%s-%s" % (next_date.year, next_date.strftime("%m"), next_date.strftime("%d"))
next_link = "http://www.aelf.org/%s/romain/messe" % (site_date)
aelf_unescape(next_link,'0_Messe.html')
laudes_link = "http://www.aelf.org/%s/romain/laudes" % (site_date)
aelf_unescape(laudes_link,'1_Laudes.html')
lectures_link = "http://www.aelf.org/%s/romain/lectures" % (site_date)
aelf_unescape(lectures_link,'2_Lectures.html')
tierce_link = "http://www.aelf.org/%s/romain/tierce" % (site_date)
aelf_unescape(tierce_link,'3_Tierce.html')
sexte_link = "http://www.aelf.org/%s/romain/sexte" % (site_date)
aelf_unescape(sexte_link,'4_Sexte.html')
none_link = "http://www.aelf.org/%s/romain/none" % (site_date)
aelf_unescape(none_link,'5_None.html')
vepres_link = "http://www.aelf.org/%s/romain/vepres" % (site_date)
aelf_unescape(vepres_link,'6_Vepres.html')
complies_link = "http://www.aelf.org/%s/romain/complies" % (site_date)
aelf_unescape(complies_link,'7_Complies.html')
try:
request=urllib.request.Request(next_link,None,headers) #The assembled request
response = urllib.request.urlopen(request)
html_doc = response.read()
html_doc = html_doc.decode('utf-8')
except urllib.error.URLError as e:
print(e.reason)
#Extract ordo and create day index file
soup = BeautifulSoup(html_doc, "lxml")
ordo_text = soup.find("div", {"class": "heading-day"})
text_file = open("index.html", "w", encoding='utf-8')
for hidden in ordo_text.find_all("div", {"class": ['btn btn-default btn-aelf btn-red m-t-10', 'btn btn-default btn-aelf myDatePicker', 'dropdown m-t-10', 'block-buttons-navigation only-tablet m-t-10']}):
hidden.decompose()
for hidden in ordo_text.find_all("a", {"class": ['btn btn-default btn-aelf myDatePicker', 'btn btn-default btn-aelf btn-red m-t-10']}):
hidden.decompose()
part1 = """
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
</head>
<body>
"""
part3 = """
<div style="text-align: center; font-size:130%; line-height:150%"><a href="0_Messe.html">Messe</a> |
<a href="1_Laudes.html">Laudes</a> |
<a href="2_Lectures.html">Lectures</a> |
<a href="3_Tierce.html">Tierce</a> |
<a href="4_Sexte.html">Sexte</a> |
<a href="5_None.html">None</a> |
<a href="6_Vepres.html">Vepres</a> |
<a href="7_Complies.html">Complies</a>
<br><br>
</div>
<div style="text-align: center; font-size:130%;">
"""
if i != 1:
hier = next_date + datetime.timedelta(days=-1)
part4 = "<a href=\"../" + str(r'%s-%s-%s' % (hier.strftime("%y"), hier.strftime("%m"), hier.strftime("%d"))) + "/index.html\">Jour-</a> <a href=\"../index.html\">Retour</a>"
else:
part4 = """
<a href="../index.html">Retour</a>
"""
if i != 63:
demain = next_date + datetime.timedelta(days=1)
part5 = " <a href=\"../" + str(r'%s-%s-%s' % (demain.strftime("%y"), demain.strftime("%m"), demain.strftime("%d"))) + "/index.html\">Jour+</a></div></body>"
else:
part5 = "</div></body></html>"
joined = "%s%s%s%s%s" % (part1, ordo_text, part3, part4, part5)
text_file.write(joined)
text_file.close()
#Clean pages
for filename in os.listdir('.'):
if re.match(r'\d.*', filename):
with open(filename, 'rb') as messy:
soup = BeautifulSoup(messy, "lxml")
messy.close()
while True:
h2 = soup.find('h2')
if not h2:
break
h2.name = 'h3'
for remove in soup.find_all(attrs={'class':['menu-secondary-mobile', 'col-sm-3 col-md-2', 'col-sm-5 col-md-5', 'col-sm-7', 'col-sm-9 col-md-5', 'container-toolbar']}):
remove.decompose()
for remove in soup.find_all(id=['middle-col', 'toolbar', 'menu-mobile', 'header']):
remove.decompose()
for remove in soup.find_all('script'):
remove.decompose()
cleaned = str(soup)
with_retour = re.sub(r'</body>', r'<div style="text-align: center; font-size:130%; line-height:150%"><a href="index.html">Retour</a></div></body>', cleaned)
with_retour = re.sub(r'(</br>){2,}', r'\n <p> </p> \n', with_retour) #Remove extra blank lines
with_retour = re.sub(r'</br/>\s?</br/>', r'\n <p> </p> \n', with_retour) #replace <br> <br> with empty paragraphs
output_file = open(filename, "w", encoding='utf-8')
output_file.write(with_retour)
# Go to parent folder and add 1 day
os.chdir("..")
if next_date.weekday() == 6:
date_link = '<br><b><a href="%s/index.html">%s-%s</a></b> | ' % (next_folder, next_date.strftime("%d"), next_date.strftime("%m")) #Add link to main index
else:
date_link = '<a href="%s/index.html">%s</a> | ' % (next_folder, next_date.strftime("%d")) #Add link to main index
main_index = main_index + date_link
next_date = Base_date + datetime.timedelta(days=i)
#Close main index file
print('\nPréparation de l\'index (à ajouter à Calibre)')
main_index = main_index + '</div><div>© AELF</div></body></html>'
text_file = open("index.html", "w", encoding='utf-8')
text_file.write(main_index)
text_file.close()
| entodoays/aelf4cal | aelf4cal.py | Python | gpl-2.0 | 7,847 |
from django.utils.translation import ugettext as _
from django.utils.translation import get_language
from django.contrib import admin
from django.utils.encoding import force_unicode
from django.utils.html import escape
from django.http import Http404, HttpResponseRedirect
from django.core.urlresolvers import reverse
from functools import update_wrapper
from django.contrib.admin.util import unquote
from django.conf import settings
from django import forms
from django.forms.models import BaseInlineFormSet
class GetLanguageMixin(object):
def get_language_request(self, request, as_dict=False, as_qs=False, suffix='', add_suffix=False):
language_code = request.REQUEST.get(self.language_field, request.REQUEST.get(self.language_field + suffix, get_language()))
if as_dict:
return {'translation_language_code': language_code, 'translation_language_field': self.language_field,}
if as_qs:
return self.language_field + (add_suffix and suffix or '') +'='+language_code
return language_code
def get_language_request_tabs(self, request, obj=None):
tabs = {}
language_dict = self.get_language_request(request, as_dict=True)
language_code = language_dict['translation_language_code']
for code, name in settings.LANGUAGES:
tabs[code] = {'name': name}
if code == language_code:
tabs[code]['current'] = True
allow_deletion = False
if obj:
translations = getattr(obj, self.translation_accessor).values('id', self.language_field)
if len(translations) > 1:
allow_deletion = True
for translation in translations:
code = translation[self.language_field]
if code in tabs:
tabs[code]['id'] = translation['id']
return dict(allow_deletion = allow_deletion, language_tabs = tabs, **language_dict)
class AtLeastOneRequiredInlineFormSet(BaseInlineFormSet):
error_msg = 'At least one item required.'
def clean(self):
"""Check that at least one inline has been filled."""
super(AtLeastOneRequiredInlineFormSet, self).clean()
if any(self.errors):
return
if not any(cleaned_data and not cleaned_data.get('DELETE', False)
for cleaned_data in self.cleaned_data):
raise forms.ValidationError(self.error_msg)
class AtLeastOneTranslationRequiredInlineFormSet(AtLeastOneRequiredInlineFormSet):
error_msg = 'Translation required, fill in fields or switch language.'
def make_translation_admin(translationmodel,
SharedAdminBase=admin.ModelAdmin,
TranslationAdminBase=admin.ModelAdmin,
TranslationInlineBase=admin.StackedInline,
return_parts=False):
class BaseTranslationInline(GetLanguageMixin, TranslationInlineBase):
language_field = 'language_code'
exclude = ('language_code',)
def queryset(self, request):
queryset = super(BaseTranslationInline, self).queryset(request)
queryset = queryset.filter(**{self.language_field:self.get_language_request(request)})
return queryset
class TranslationInline(BaseTranslationInline):
max_num = 1
model = translationmodel
template = 'admin/dragoman_blog/stacked_inline.html'
formset = AtLeastOneTranslationRequiredInlineFormSet
class BaseTranslationAdmin(GetLanguageMixin, TranslationAdminBase):
language_field = 'language_code'
delete_confirmation_template = 'admin/dragoman_blog/delete_confirmation.html'
change_list_template = 'admin/dragoman_blog/change_list.html'
def __init__(self, model, admin_site, sharedadmin):
self.sharedadmin = sharedadmin
return super(BaseTranslationAdmin, self).__init__(model, admin_site)
def change_view(self, request, object_id, form_url='', extra_context=None):
"The 'change' admin view for this model."
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
qs = '?'+self.language_field+'='+getattr(obj, self.language_field)
shared_pk = getattr(obj, self.sharedadmin.translation_of_field).pk
info = self.sharedadmin.model._meta.app_label, self.sharedadmin.model.__name__.lower()
urlname = 'admin:%s_%s_change' % info
return HttpResponseRedirect(reverse(urlname, args=[shared_pk])+qs)
def changelist_view(self, request, extra_context={}):
extra_context.update(self.get_language_request(request, as_dict=True, suffix='__exact'))
return super(BaseTranslationAdmin, self).changelist_view(request, extra_context=extra_context)
def delete_view(self, request, object_id, extra_context={}):
extra_context.update(self.get_language_request(request, as_dict=True))
resp = super(BaseTranslationAdmin, self).delete_view(request, object_id, extra_context=extra_context)
if 'Location' in resp:
resp['Location'] = resp['Location']+'?'+self.get_language_request(request, as_qs=True)
return resp
class TranslationAdmin(BaseTranslationAdmin):
list_display = ('title', 'language_code')
list_filter = ('language_code',)
class SharedAdmin(GetLanguageMixin, SharedAdminBase):
language_field = 'language_code'
change_form_template = 'admin/dragoman_blog/change_form.html'
delete_confirmation_template = 'admin/dragoman_blog/delete_confirmation.html'
translation_model_map = {}
def __init__(self, model, admin_site):
super(SharedAdmin, self).__init__(model, admin_site)
self.translation_admin = self.translation[1](self.translation[0], admin_site, self)
self.inlines = [self.translation[2]] + self.inlines
self.translation_model = self.translation[0]
for inline in self.inlines:
if issubclass(inline, BaseTranslationInline):
self.translation_model_map[inline.model] = inline.language_field
for related_object in self.model._meta.get_all_related_objects():
if related_object.model == self.translation_model:
self.translation_accessor = related_object.get_accessor_name()
self.translation_of_field = related_object.field.name
translation = (translationmodel, TranslationAdmin, TranslationInline)
def get_object(self, request, object_id):
obj = super(SharedAdmin, self).get_object(request, object_id)
if obj:
try:
obj.title = unicode(getattr(obj, self.translation_accessor).get(**{self.language_field:self.get_language_request(request)}))
except self.translation_model.DoesNotExist:
pass
return obj
def delete_view(self, request, object_id, extra_context={}):
extra_context.update(self.get_language_request(request, as_dict=True))
resp = super(SharedAdmin, self).delete_view(request, object_id, extra_context=extra_context)
if 'Location' in resp:
resp['Location'] = resp['Location']+'?'+self.get_language_request(request, as_qs=True)
return resp
def add_view(self, request, form_url='', extra_context={}):
if request.method != 'POST' and not 'language_code' in request.GET:
return HttpResponseRedirect(request.path+'?'+self.get_language_request(request, as_qs=True))
else:
extra_context.update(self.get_language_request_tabs(request))
return super(SharedAdmin, self).add_view(request, form_url=form_url, extra_context=extra_context)
def change_view(self, request, object_id, form_url='', extra_context={}):
if request.method != 'POST' and not 'language_code' in request.GET:
return HttpResponseRedirect(request.path+'?'+self.get_language_request(request, as_qs=True))
else:
obj = self.get_object(request, unquote(object_id))
extra_context.update(self.get_language_request_tabs(request, obj))
return super(SharedAdmin, self).change_view(request, object_id, form_url=form_url, extra_context=extra_context)
def response_change(self, request, obj):
add_suffix = False
if not ("_continue" in request.POST or "_addanother" in request.POST):
add_suffix = True
resp = super(SharedAdmin, self).response_change(request, obj)
resp['Location'] = resp['Location']+'?'+self.get_language_request(request, as_qs=True, suffix='__exact', add_suffix=add_suffix)
return resp
def response_add(self, request, obj):
add_suffix = False
if not ("_continue" in request.POST or "_addanother" in request.POST):
add_suffix = True
resp = super(SharedAdmin, self).response_add(request, obj)
resp['Location'] = resp['Location']+'?'+self.get_language_request(request, as_qs=True, suffix='__exact', add_suffix=add_suffix)
return resp
def get_urls(self):
from django.conf.urls import patterns, url, include
def wrap(view):
def wrapper(*args, **kwargs):
return self.admin_site.admin_view(view)(*args, **kwargs)
return update_wrapper(wrapper, view)
info = self.model._meta.app_label, self.model.__name__.lower()
info2 = self.translation_model._meta.app_label, self.translation_model.__name__.lower()
urlpatterns = patterns('',
url(r'^$', wrap(self.translation_admin.changelist_view), name='%s_%s_changelist' % info),
url(r'^translation/', include(self.translation_admin.urls)),
url(r'^$', wrap(self.translation_admin.changelist_view), name='%s_%s_changelist' % info2),
)
return urlpatterns + super(SharedAdmin, self).get_urls() + patterns('',
url(r'^add/$', wrap(self.add_view), name='%s_%s_add' % info2),
)
def urls(self):
return self.get_urls()
urls = property(urls)
def save_formset(self, request, form, formset, change):
if formset.model in self.translation_model_map:
formset.save(commit=False)
for f in formset.forms:
if f.is_valid():
obj = f.instance
setattr(obj, self.translation_model_map[formset.model], self.get_language_request(request))
super(SharedAdmin, self).save_formset(request, form, formset, change)
if return_parts:
return SharedAdmin, TranslationAdmin, TranslationInline, BaseTranslationAdmin, BaseTranslationInline
return SharedAdmin
| fivethreeo/django-dragoman-blog | dragoman_blog/admin_utils.py | Python | bsd-3-clause | 11,719 |
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
import sys
import keyword
import re
import types
import warnings
import inspect
from itertools import zip_longest
from collections import namedtuple
from scipy._lib import doccer
from scipy._lib._util import _lazywhere
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state
from scipy.special import (comb, chndtr, entr, xlogy, ive)
# for root finding for continuous distribution ppf, and max likelihood
# estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
# for scipy.stats.entropy. Attempts to import just that function or file
# have cause import problems
from scipy import stats
from numpy import (arange, putmask, ravel, ones, shape, ndarray, zeros, floor,
logical_and, log, sqrt, place, argmax, vectorize, asarray,
nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)
Random variates.
"""
_doc_pdf = """\
pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = """\
logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = """\
pmf(k, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = """\
logpmf(k, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = """\
cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative distribution function.
"""
_doc_logcdf = """\
logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative distribution function.
"""
_doc_sf = """\
sf(x, %(shapes)s, loc=0, scale=1)
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = """\
ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
moment(order, %(shapes)s, loc=0, scale=1)
Non-central moment of the specified order.
"""
_doc_stats = """\
stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = """\
fit(data)
Parameter estimates for generic data.
See `scipy.stats.rv_continuous.fit <https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.fit.html#scipy.stats.rv_continuous.fit>`__ for detailed documentation of the
keyword arguments.
"""
_doc_expect = """\
expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = """\
mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = """\
var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = """\
std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = """\
interval(confidence, %(shapes)s, loc=0, scale=1)
Confidence interval with equal areas around the median.
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``. Note that shifting the location of a distribution
does not make it a "noncentral" distribution; noncentral generalizations of
some distributions are available in separate classes.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate the first four moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""kurtosis is fourth central moment / variance**2 - 3."""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
def _fit_determine_optimizer(optimizer):
if not callable(optimizer) and isinstance(optimizer, str):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError as e:
raise ValueError("%s is not a valid optimizer" % optimizer) from e
return optimizer
# Frozen RV class
class rv_frozen:
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.a, self.b = self.dist._get_support(*shapes)
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, order=None, **kwds):
return self.dist.moment(order, *self.args, **self.kwds, **kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, confidence=None, **kwds):
return self.dist.interval(confidence, *self.args, **self.kwds, **kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
def support(self):
return self.dist.support(*self.args, **self.kwds)
def argsreduce(cond, *args):
"""Clean arguments to:
1. Ensure all arguments are iterable (arrays of dimension at least one
2. If cond != True and size > 1, ravel(args[i]) where ravel(condition) is
True, in 1D.
Return list of processed arguments.
Examples
--------
>>> rng = np.random.default_rng()
>>> A = rng.random((4, 5))
>>> B = 2
>>> C = rng.random((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(4, 5)
>>> B1.shape
(1,)
>>> C1.shape
(1, 5)
>>> cond[2,:] = 0
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> A1.shape
(15,)
>>> B1.shape
(1,)
>>> C1.shape
(15,)
"""
# some distributions assume arguments are iterable.
newargs = np.atleast_1d(*args)
# np.atleast_1d returns an array if only one argument, or a list of arrays
# if more than one argument.
if not isinstance(newargs, list):
newargs = [newargs, ]
if np.all(cond):
# broadcast arrays with cond
*newargs, cond = np.broadcast_arrays(*newargs, cond)
return [arg.ravel() for arg in newargs]
s = cond.shape
# np.extract returns flattened arrays, which are not broadcastable together
# unless they are either the same size or size == 1.
return [(arg if np.size(arg) == 1
else np.extract(cond, np.broadcast_to(arg, s)))
for arg in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# The function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the
# factor of exp(-xs*ns) into the ive function to improve numerical
# stability at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
# Return res + np.log(corr) avoiding np.log(0)
return _lazywhere(
corr > 0,
(res, corr),
f=lambda r, c: r + np.log(c),
fillvalue=-np.inf)
def _ncx2_pdf(x, df, nc):
# Copy of _ncx2_log_pdf avoiding np.log(0) when corr = 0
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
corr = ive(df2, xs*ns) / 2.0
return np.exp(res) * corr
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic:
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super().__init__()
# figure out if _stats signature has 'moments' keyword
sig = _getfullargspec(self._stats)
self._stats_has_moments = ((sig.varkw is not None) or
('moments' in sig.args) or
('moments' in sig.kwonlyargs))
self._random_state = check_random_state(seed)
# For historical reasons, `size` was made an attribute that was read
# inside _rvs(). The code is being changed so that 'size'
# is an argument
# to self._rvs(). However some external (non-SciPy) distributions
# have not
# been updated. Maintain backwards compatibility by checking if
# the self._rvs() signature has the 'size' keyword, or a **kwarg,
# and if not set self._size inside self.rvs()
# before calling self._rvs().
argspec = inspect.getfullargspec(self._rvs)
self._rvs_uses_size_attribute = (argspec.varkw is None and
'size' not in argspec.args and
'size' not in argspec.kwonlyargs)
# Warn on first use only
self._rvs_size_warned = False
@property
def random_state(self):
"""Get or set the generator object for generating random variates.
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __setstate__(self, state):
try:
self.__dict__.update(state)
# attaches the dynamically created methods on each instance.
# if a subclass overrides rv_generic.__setstate__, or implements
# it's own _attach_methods, then it must make sure that
# _attach_argparser_methods is called.
self._attach_methods()
except ValueError:
# reconstitute an old pickle scipy<1.6, that contains
# (_ctor_param, random_state) as state
self._ctor_param = state[0]
self._random_state = state[1]
self.__init__()
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_* instance.
This method must be overridden by subclasses, and must itself call
_attach_argparser_methods. This method is called in __init__ in
subclasses, and in __setstate__
"""
raise NotImplementedError
def _attach_argparser_methods(self):
"""
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Should be called from `_attach_methods`, typically in __init__ and
during unpickling (__setstate__)
"""
ns = {}
exec(self._parse_arg_template, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name, types.MethodType(ns[name], self))
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser string for the shape arguments.
This method should be called in __init__ of a class for each
distribution. It creates the `_parse_arg_template` attribute that is
then used by `_attach_argparser_methods` to dynamically create and
attach the `_parse_args`, `_parse_args_stats`, `_parse_args_rvs`
methods to the instance.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, str):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getfullargspec(meth) # NB does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.varkw is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.kwonlyargs:
raise TypeError(
'kwonly args are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
# this string is used by _attach_argparser_methods
self._parse_arg_template = parse_arg_template % dct
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
try:
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
except TypeError as e:
raise Exception("Unable to construct docstring for "
"distribution \"%s\": %s" %
(self.name, repr(e))) from e
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Noncentral moments (also known as the moment about the origin).
# Expressed in LaTeX, munp would be $\mu'_{n}$, i.e. "mu-sub-n-prime".
# The primed mu is a widely used notation for the noncentral moment.
def _munp(self, n, *args):
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = self.generic_moment(n, *args)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters. %s, %s, %s" % (size, size_,
bcast_shape))
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
# These are the methods you must define (standard form functions)
# NB: generic _pdf, _logpdf, _cdf are different for
# rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _get_support(self, *args, **kwargs):
"""Return the support of the (unscaled, unshifted) distribution.
*Must* be overridden by distributions which have support dependent
upon the shape parameters of the distribution. Any such override
*must not* set or change any of the class members, as these members
are shared amongst all instances of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support for the specified
shape parameters.
"""
return self.a, self.b
def _support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a <= x) & (x <= b)
def _open_support_mask(self, x, *args):
a, b = self._get_support(*args)
with np.errstate(invalid='ignore'):
return (a < x) & (x < b)
def _rvs(self, *args, size=None, random_state=None):
# This method must handle size being a tuple, and it must
# properly broadcast *args and size. size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
# Use basic inverse cdf algorithm for RV generation as default.
U = random_state.uniform(size=size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
with np.errstate(divide='ignore'):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
message = ("Domain error in arguments. The `scale` parameter must "
"be positive for all distributions; see the "
"distribution documentation for other restrictions.")
raise ValueError(message)
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
random_state = check_random_state(rndm)
else:
random_state = self._random_state
# Maintain backwards compatibility by setting self._size
# for distributions that still need it.
if self._rvs_uses_size_attribute:
if not self._rvs_size_warned:
warnings.warn(
f'The signature of {self._rvs} does not contain '
f'a "size" keyword. Such signatures are deprecated.',
np.VisibleDeprecationWarning)
self._rvs_size_warned = True
self._size = size
self._random_state = random_state
vals = self._rvs(*args)
else:
vals = self._rvs(*args, size=size, random_state=random_state)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = np.full(shape(cond), fill_value=self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
# if mean is inf then var is also inf
with np.errstate(invalid='ignore'):
mu2 = np.where(~np.isinf(mu), mu2p - mu**2, np.inf)
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
with np.errstate(invalid='ignore'):
mu3 = (-mu*mu - 3*mu2)*mu + mu3p
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if g1 is None:
mu3 = None
else:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if mu3 is None:
mu3p = self._munp(3, *goodargs)
with np.errstate(invalid='ignore'):
mu3 = (-mu * mu - 3 * mu2) * mu + mu3p
with np.errstate(invalid='ignore'):
mu4 = ((-mu**2 - 6*mu2) * mu - 4*mu3)*mu + mu4p
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = [default.copy() for _ in moments]
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, scale, *args)
goodscale = goodargs[0]
goodargs = goodargs[1:]
place(output, cond0, self.vecentropy(*goodargs) + log(goodscale))
return output
def moment(self, order=None, *args, **kwds):
"""non-central moment of distribution of specified order.
.. deprecated:: 1.9.0
Parameter `n` is replaced by parameter `order` to avoid name
collisions with the shape parameter `n` of several distributions.
Parameter `n` will be removed in the second release after 1.9.0.
Parameters
----------
order : int, order >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
# This function was originally written with parameter `n`, but `n`
# is also the name of many distribution shape parameters.
# This block allows the function to accept both `n` and its
# replacement `order` during a deprecation period; it can be removed
# in the second release after 1.9.0.
# The logic to provide a DeprecationWarning only when `n` is passed
# as a keyword, accept the new keyword `order`, and otherwise be
# backward-compatible deserves explanation. We need to look out for
# the following:
# * Does the distribution have a shape named `n`?
# * Is `order` provided? It doesn't matter whether it is provided as a
# positional or keyword argument; it will be used as the order of the
# moment rather than a distribution shape parameter because:
# - The first positional argument of `moment` has always been the
# order of the moment.
# - The keyword `order` is new, so it's unambiguous that it refers to
# the order of the moment.
# * Is `n` provided as a keyword argument? It _does_ matter whether it
# is provided as a positional or keyword argument.
# - The first positional argument of `moment` has always been the
# order of moment, but
# - if `n` is provided as a keyword argument, its meaning depends
# on whether the distribution accepts `n` as a shape parameter.
has_shape_n = (self.shapes is not None
and "n" in (self.shapes.split(", ")))
got_order = order is not None
got_keyword_n = kwds.get("n", None) is not None
# These lead to the following cases.
# Case A: If the distribution _does_ accept `n` as a shape
# 1. If both `order` and `n` are provided, this is now OK:
# it is unambiguous that `order` is the order of the moment and `n`
# is the shape parameter. Previously, this would have caused an
# error because `n` was provided both as a keyword argument and
# as the first positional argument. I don't think it is credible for
# users to rely on this error in their code, though, so I don't see
# this as a backward compatibility break.
# 2. If only `n` is provided (as a keyword argument), this would have
# been an error in the past because `n` would have been treated as
# the order of the moment while the shape parameter would be
# missing. It is still the same type of error, but for a different
# reason: now, `n` is treated as the shape parameter while the
# order of the moment is missing.
# 3. If only `order` is provided, no special treament is needed.
# Clearly this value is intended to be the order of the moment,
# and the rest of the function will determine whether `n` is
# available as a shape parameter in `args`.
# 4. If neither `n` nor `order` is provided, this would have been an
# error (order of the moment is not provided) and it is still an
# error for the same reason.
# Case B: the distribution does _not_ accept `n` as a shape
# 1. If both `order` and `n` are provided, this was an error, and it
# still is an error: two values for same parameter.
# 2. If only `n` is provided (as a keyword argument), this was OK and
# is still OK, but there shold now be a `DeprecationWarning`. The
# value of `n` should be removed from `kwds` and stored in `order`.
# 3. If only `order` is provided, there was no problem before providing
# only the first argument of `moment`, and there is no problem with
# that now.
# 4. If neither `n` nor `order` is provided, this would have been an
# error (order of the moment is not provided), and it is still an
# error for the same reason.
if not got_order and ((not got_keyword_n) # A4 and B4
or (got_keyword_n and has_shape_n)): # A2
message = ("moment() missing 1 required "
"positional argument: `order`")
raise TypeError(message)
if got_keyword_n and not has_shape_n:
if got_order: # B1
# this will change to "moment got unexpected argument n"
message = "moment() got multiple values for first argument"
raise TypeError(message)
else: # B2
message = ("Use of keyword argument `n` for method "
"`moment` is deprecated. Use first positional "
"argument or keyword argument `order` instead.")
order = kwds.pop("n")
warnings.warn(message, DeprecationWarning, stacklevel=2)
n = order
# No special treatment of A1, A3, or B3 is needed because the order
# of the moment is now in variable `n` and the shape parameter, if
# needed, will be fished out of `args` or `kwds` by _parse_args
# A3 might still cause an error if the shape parameter called `n`
# is not found in `args`.
shapes, loc, scale = self._parse_args(*args, **kwds)
args = np.broadcast_arrays(*(*shapes, loc, scale))
*shapes, loc, scale = args
i0 = np.logical_and(self._argcheck(*shapes), scale > 0)
i1 = np.logical_and(i0, loc == 0)
i2 = np.logical_and(i0, loc != 0)
args = argsreduce(i0, *shapes, loc, scale)
*shapes, loc, scale = args
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*shapes, **mdict)
val = np.empty(loc.shape) # val needs to be indexed by loc
val[...] = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, shapes)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
result = zeros(i0.shape)
place(result, ~i0, self.badvalue)
if i1.any():
res1 = scale[loc == 0]**n * val[loc == 0]
place(result, i1, res1)
if i2.any():
mom = [mu, mu2, g1, g2]
arrs = [i for i in mom if i is not None]
idx = [i for i in range(4) if mom[i] is not None]
if any(idx):
arrs = argsreduce(loc != 0, *arrs)
j = 0
for i in idx:
mom[i] = arrs[j]
j += 1
mu, mu2, g1, g2 = mom
args = argsreduce(loc != 0, *shapes, loc, scale, val)
*shapes, loc, scale, val = args
res2 = zeros(loc.shape, dtype='d')
fac = scale / loc
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp,
shapes)
res2 += comb(n, k, exact=True)*fac**k * valk
res2 += fac**n * val
res2 *= loc**n
place(result, i2, res2)
if result.ndim == 0:
return result.item()
return result
def median(self, *args, **kwds):
"""Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, confidence=None, *args, **kwds):
"""Confidence interval with equal areas around the median.
.. deprecated:: 1.9.0
Parameter `alpha` is replaced by parameter `confidence` to avoid
name collisions with the shape parameter `alpha` of some
distributions. Parameter `alpha` will be removed in the second
release after 1.9.0.
Parameters
----------
confidence : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
# This function was originally written with parameter `alpha`, but
# `alpha` is also the name of a shape parameter of two distributions.
# This block allows the function to accept both `alpha` and its
# replacement `confidence` during a deprecation period; it can be
# removed in the second release after 1.9.0.
# See description of logic in `moment` method.
has_shape_alpha = (self.shapes is not None
and "alpha" in (self.shapes.split(", ")))
got_confidence = confidence is not None
got_keyword_alpha = kwds.get("alpha", None) is not None
if not got_confidence and ((not got_keyword_alpha)
or (got_keyword_alpha and has_shape_alpha)):
message = ("interval() missing 1 required positional argument: "
"`confidence`")
raise TypeError(message)
if got_keyword_alpha and not has_shape_alpha:
if got_confidence:
# this will change to "interval got unexpected argument alpha"
message = "interval() got multiple values for first argument"
raise TypeError(message)
else:
message = ("Use of keyword argument `alpha` for method "
"`interval` is deprecated. Use first positional "
"argument or keyword argument `confidence` "
"instead.")
confidence = kwds.pop("alpha")
warnings.warn(message, DeprecationWarning, stacklevel=2)
alpha = confidence
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
def support(self, *args, **kwargs):
"""Support of the distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : array_like
end-points of the distribution's support.
"""
args, loc, scale = self._parse_args(*args, **kwargs)
arrs = np.broadcast_arrays(*args, loc, scale)
args, loc, scale = arrs[:-2], arrs[-2], arrs[-1]
cond = self._argcheck(*args) & (scale > 0)
_a, _b = self._get_support(*args)
if cond.all():
return _a * scale + loc, _b * scale + loc
elif cond.ndim == 0:
return self.badvalue, self.badvalue
# promote bounds to at least float to fill in the badvalue
_a, _b = np.asarray(_a).astype('d'), np.asarray(_b).astype('d')
out_a, out_b = _a * scale + loc, _b * scale + loc
place(out_a, 1-cond, self.badvalue)
place(out_b, 1-cond, self.badvalue)
return out_a, out_b
def nnlf(self, theta, x):
"""Negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x, *args)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf(self, x, *args):
return -np.sum(self._logpxf(x, *args), axis=0)
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x, *args)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpxf = self._logpxf(x, *args)
finite_logpxf = np.isfinite(logpxf)
n_bad += np.sum(~finite_logpxf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpxf[finite_logpxf], axis=0) + penalty
return -np.sum(logpxf, axis=0)
def _penalized_nnlf(self, theta, x):
"""Penalized negative loglikelihood function.
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
"""
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
class _ShapeInfo:
def __init__(self, name, integrality=False, domain=(-np.inf, np.inf),
inclusive=(True, True)):
self.name = name
self.integrality = integrality
domain = list(domain)
if np.isfinite(domain[0]) and not inclusive[0]:
domain[0] = np.nextafter(domain[0], np.inf)
if np.isfinite(domain[1]) and not inclusive[1]:
domain[1] = np.nextafter(domain[1], -np.inf)
self.domain = domain
def _get_fixed_fit_value(kwds, names):
"""
Given names such as `['f0', 'fa', 'fix_a']`, check that there is
at most one non-None value in `kwds` associaed with those names.
Return that value, or None if none of the names occur in `kwds`.
As a side effect, all occurrences of those names in `kwds` are
removed.
"""
vals = [(name, kwds.pop(name)) for name in names if name in kwds]
if len(vals) > 1:
repeated = [name for name, val in vals]
raise ValueError("fit method got multiple keyword arguments to "
"specify the same fixed parameter: " +
', '.join(repeated))
return vals[0][1] if vals else None
# continuous random variables: implement maybe later
#
# hf --- Hazard Function (PDF / SF)
# chf --- Cumulative hazard function (-log(SF))
# psf --- Probability sparsity function (reciprocal of the pdf) in
# units of percent-point-function (as a function of q).
# Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
support
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of the distribution.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
For most of the scipy.stats distributions, the support interval doesn't
depend on the shape parameters. ``x`` being in the support interval is
equivalent to ``self.a <= x <= self.b``. If either of the endpoints of
the support do depend on the shape parameters, then
i) the distribution must implement the ``_get_support`` method; and
ii) those dependent endpoints must be omitted from the distribution's
call to the ``rv_continuous`` initializer.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
The default method ``_rvs`` relies on the inverse of the cdf, ``_ppf``,
applied to a uniform random variate. In order to generate random variates
efficiently, either the default ``_ppf`` needs to be overwritten (e.g.
if the inverse cdf can expressed in an explicit form) or a sampling
method needs to be implemented in a custom ``_rvs`` method.
If possible, you should override ``_isf``, ``_sf`` or ``_logsf``.
The main reason would be to improve numerical accuracy: for example,
the survival function ``_sf`` is computed as ``1 - _cdf`` which can
result in loss of precision if ``_cdf(x)`` is close to one.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
_get_support
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
`rv_frozen` object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self.extradoc = extradoc
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
self._attach_methods()
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
# _random_state attribute is taken care of by rv_generic
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "vecentropy", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""
Attaches dynamically created methods to the rv_continuous instance.
"""
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
if self.moment_type == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
factor = 10.
left, right = self._get_support(*args)
if np.isinf(left):
left = min(-factor, right)
while self._ppf_to_solve(left, q, *args) > 0.:
left, right = left * factor, left
# left is now such that cdf(left) <= q
# if right has changed, then cdf(right) > q
if np.isinf(right):
right = max(factor, left)
while self._ppf_to_solve(right, q, *args) < 0.:
left, right = right, right * factor
# right is now such that cdf(right) >= q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._mom_integ0, _a, _b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
# Could also define any of these
def _logpdf(self, x, *args):
p = self._pdf(x, *args)
with np.errstate(divide='ignore'):
return log(p)
def _logpxf(self, x, *args):
# continuous distributions have PDF, discrete have PMF, but sometimes
# the distinction doesn't matter. This lets us use `_logpxf` for both
# discrete and continuous distributions.
return self._logpdf(x, *args)
def _cdf_single(self, x, *args):
_a, _b = self._get_support(*args)
return integrate.quad(self._pdf, _a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
# generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
# in rv_generic
def pdf(self, x, *args, **kwds):
"""Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x, *args) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= np.asarray(_b)) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = (x >= _b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x, *args) & (scale > 0)
cond2 = cond0 & (x <= _a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue)
lower_bound = _a * scale + loc
upper_bound = _b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _fitstart(self, data, args=None):
"""Starting point for fit (shape arguments + loc + scale)."""
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
def _reduce_func(self, args, kwds, data=None):
"""
Return the (possibly reduced) function to optimize in order to find MLE
estimates for the .fit method.
"""
# Convert fixed shape parameters to the standard numeric form: e.g. for
# stats.beta, shapes='a, b'. To fix `a`, the caller can give a value
# for `f0`, `fa` or 'fix_a'. The following converts the latter two
# into the first (numeric) form.
shapes = []
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
key = 'f' + str(j)
names = [key, 'f' + s, 'fix_' + s]
val = _get_fixed_fit_value(kwds, names)
if val is not None:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
methods = {"mle", "mm"}
method = kwds.pop('method', "mle").lower()
if method == "mm":
n_params = len(shapes) + 2 - len(fixedn)
exponents = (np.arange(1, n_params+1))[:, np.newaxis]
data_moments = np.sum(data[None, :]**exponents/len(data), axis=1)
def objective(theta, x):
return self._moment_error(theta, x, data_moments)
elif method == "mle":
objective = self._penalized_nnlf
else:
raise ValueError("Method '{0}' not available; must be one of {1}"
.format(method, methods))
if len(fixedn) == 0:
func = objective
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return objective(newtheta, x)
return x0, func, restore, args
def _moment_error(self, theta, x, data_moments):
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
dist_moments = np.array([self.moment(i+1, *args, loc=loc, scale=scale)
for i in range(len(data_moments))])
if np.any(np.isnan(dist_moments)):
raise ValueError("Method of moments encountered a non-finite "
"distribution moment and cannot continue. "
"Consider trying method='MLE'.")
return (((data_moments - dist_moments) /
np.maximum(np.abs(data_moments), 1e-8))**2).sum()
def fit(self, data, *args, **kwds):
"""
Return estimates of shape (if applicable), location, and scale
parameters from data. The default estimation method is Maximum
Likelihood Estimation (MLE), but Method of Moments (MM)
is also available.
Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in estimating the distribution parameters.
arg1, arg2, arg3,... : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
**kwds : floats, optional
- `loc`: initial guess of the distribution's location parameter.
- `scale`: initial guess of the distribution's scale parameter.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa`` and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use.
The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
- method : The method to use. The default is "MLE" (Maximum
Likelihood Estimate); "MM" (Method of Moments)
is also available.
Returns
-------
parameter_tuple : tuple of floats
Estimates for any shape parameters (if applicable),
followed by those for location and scale.
For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
With ``method="MLE"`` (default), the fit is computed by minimizing
the negative log-likelihood function. A large, finite penalty
(rather than infinite negative log-likelihood) is applied for
observations beyond the support of the distribution.
With ``method="MM"``, the fit is computed by minimizing the L2 norm
of the relative errors between the first *k* raw (about zero) data
moments and the corresponding distribution moments, where *k* is the
number of non-fixed parameters.
More precisely, the objective function is::
(((data_moments - dist_moments)
/ np.maximum(np.abs(data_moments), 1e-8))**2).sum()
where the constant ``1e-8`` avoids division by zero in case of
vanishing data moments. Typically, this error norm can be reduced to
zero.
Note that the standard method of moments can produce parameters for
which some data are outside the support of the fitted distribution;
this implementation does nothing to prevent this.
For either method,
the returned answer is not guaranteed to be globally optimal; it
may only be locally optimal, or the optimization may fail altogether.
If the data contain any of ``np.nan``, ``np.inf``, or ``-np.inf``,
the `fit` method will raise a ``RuntimeError``.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc``
and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
data = np.asarray(data)
method = kwds.get('method', "mle").lower()
# memory for method of moments
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
if not np.isfinite(data).all():
raise RuntimeError("The data contains non-finite values.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds, data=data)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
optimizer = _fit_determine_optimizer(optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
# In some cases, method of moments can be done with fsolve/root
# instead of an optimizer, but sometimes no solution exists,
# especially when the user fixes parameters. Minimizing the sum
# of squares of the error generalizes to these cases.
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
obj = func(vals, data)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
loc, scale, shapes = self._unpack_loc_scale(vals)
if not (np.all(self._argcheck(*shapes)) and scale > 0):
raise Exception("Optimization converged to parameters that are "
"outside the range allowed by the distribution.")
if method == 'mm':
if not np.isfinite(obj):
raise Exception("Optimization failed: either a data moment "
"or fitted distribution moment is "
"non-finite.")
return vals
def _fit_loc_scale_support(self, data, *args):
"""Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
_a, _b = self._get_support(*args)
a, b = _a, _b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale
# estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
_a, _b = self._get_support(*args)
with np.errstate(over='ignore'):
h = integrate.quad(integ, _a, _b)[0]
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(_b):
upper = upp
else:
upper = _b
if np.isinf(_a):
lower = low
else:
lower = _a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution by numerical integration.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ub
E[f(x)] = Integral(f(x) * dist.pdf(x)),
lb
where ``ub`` and ``lb`` are arguments and ``x`` has the ``dist.pdf(x)``
distribution. If the bounds ``lb`` and ``ub`` correspond to the
support of the distribution, e.g. ``[-inf, inf]`` in the default
case, then the integral is the unrestricted expectation of ``f(x)``.
Also, the function ``f(x)`` may be defined such that ``f(x)`` is ``0``
outside a finite interval in which case the expectation is
calculated within the finite range ``[lb, ub]``.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`scipy.integrate.quad`. Neither this function nor
`scipy.integrate.quad` can verify whether the integral exists or is
finite. For example ``cauchy(0).mean()`` returns ``np.nan`` and
``cauchy(0).expect()`` returns ``0.0``.
The function is not vectorized.
Examples
--------
To understand the effect of the bounds of integration consider
>>> from scipy.stats import expon
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0)
0.6321205588285578
This is close to
>>> expon(1).cdf(2.0) - expon(1).cdf(0.0)
0.6321205588285577
If ``conditional=True``
>>> expon(1).expect(lambda x: 1, lb=0.0, ub=2.0, conditional=True)
1.0000000000000002
The slight deviation from 1 is due to numerical integration.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
_a, _b = self._get_support(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + _a * scale
if ub is None:
ub = loc + _b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
with np.errstate(all='ignore'):
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
return vals
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", False, (-np.inf, np.inf), (False, False))
scale_info = _ShapeInfo("scale", False, (0, np.inf), (False, False))
param_info = shape_info + [loc_info, scale_info]
return param_info
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
_a, _b = self._get_support(*args)
return _expect(fun, _a, _b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
_a, _b = self._get_support(*args)
b = _b
a = _a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= _b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= _a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers and ``pk`` are the non-zero
probabilities between 0 and 1 with ``sum(pk) = 1``. ``xk``
and ``pk`` must have the same shape.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
support
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super().__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in __setstate__
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs",
"_cdfvec", "_ppfvec", "generic_moment"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created methods to the rv_discrete instance."""
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
# _attach_methods is responsible for calling _attach_argparser_methods
self._attach_argparser_methods()
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = types.MethodType(_vec_generic_moment, self)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = types.MethodType(_vppf, self)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
def _updated_ctor_param(self):
"""Return the current version of _ctor_param, possibly updated by user.
Used by freezing.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _logpxf(self, k, *args):
# continuous distributions have PDF, discrete have PMF, but sometimes
# the distinction doesn't matter. This lets us use `_logpxf` for both
# discrete and continuous distributions.
return self._logpmf(k, *args)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-1]
scale = 1
args = tuple(theta[:-1])
except IndexError as e:
raise ValueError("Not enough input arguments.") from e
return loc, scale, args
def _cdf_single(self, k, *args):
_a, _b = self._get_support(*args)
m = arange(int(_a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance
then that instance is used.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super().rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k <= _b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, cond2*(cond0 == cond0), 1.0)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k >= _b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= _a) & (k < _b)
cond2 = (k < _a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), _a-1 + loc)
place(output, cond2, _b + loc)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
_a, _b = self._get_support(*args)
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond3 = (q == 0) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = np.full(shape(cond), fill_value=self.badvalue, dtype='d')
# output type 'd' to handle nin and inf
lower_bound = _a - 1 + loc
upper_bound = _b + loc
place(output, cond2*(cond == cond), lower_bound)
place(output, cond3*(cond == cond), upper_bound)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return stats.entropy(self.pk)
else:
_a, _b = self._get_support(*args)
return _expect(lambda x: entr(self.pmf(x, *args)),
_a, _b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution by numerical summation.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``lb <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``lb <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or
may not exist,
depending on the function, `func`. If it does exist, but the
sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result,
but may also make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
_a, _b = self._get_support(*args)
if lb is None:
lb = _a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = _b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
if isinstance(self, rv_sample):
res = self._expect(fun, lb, ub)
return res / invfac
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _param_info(self):
shape_info = self._shape_info()
loc_info = _ShapeInfo("loc", True, (-np.inf, np.inf), (False, False))
param_info = shape_info + [loc_info]
return param_info
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is
infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if np.shape(xk) != np.shape(pk):
raise ValueError("xk and pk must have the same shape.")
if np.less(pk, 0.0).any():
raise ValueError("All elements of pk must be non-negative.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._attach_methods()
self._construct_docstrings(name, longname, extradoc)
def __getstate__(self):
dct = self.__dict__.copy()
# these methods will be remade in rv_generic.__setstate__,
# which calls rv_generic._attach_methods
attrs = ["_parse_args", "_parse_args_stats", "_parse_args_rvs"]
[dct.pop(attr, None) for attr in attrs]
return dct
def _attach_methods(self):
"""Attaches dynamically created argparser methods."""
self._attach_argparser_methods()
def _get_support(self, *args):
"""Return the support of the (unscaled, unshifted) distribution.
Parameters
----------
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
a, b : numeric (float, or int or +/-np.inf)
end-points of the distribution's support.
"""
return self.a, self.b
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self, size=None, random_state=None):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = random_state.uniform(size=size)
if size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return stats.entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
def _expect(self, fun, lb, ub, *args, **kwds):
# ignore all args, just do a brute force summation
supp = self.xk[(lb <= self.xk) & (self.xk <= ub)]
vals = fun(supp)
return np.sum(vals)
def _check_shape(argshape, size):
"""
This is a utility function used by `_rvs()` in the class geninvgauss_gen.
It compares the tuple argshape to the tuple size.
Parameters
----------
argshape : tuple of integers
Shape of the arguments.
size : tuple of integers or integer
Size argument of rvs().
Returns
-------
The function returns two tuples, scalar_shape and bc.
scalar_shape : tuple
Shape to which the 1-d array of random variates returned by
_rvs_scalar() is converted when it is copied into the
output array of _rvs().
bc : tuple of booleans
bc is an tuple the same length as size. bc[j] is True if the data
associated with that index is generated in one call of _rvs_scalar().
"""
scalar_shape = []
bc = []
for argdim, sizedim in zip_longest(argshape[::-1], size[::-1],
fillvalue=1):
if sizedim > argdim or (argdim == sizedim == 1):
scalar_shape.append(sizedim)
bc.append(True)
else:
bc.append(False)
return tuple(scalar_shape[::-1]), tuple(bc[::-1])
def get_distribution_names(namespace_pairs, rv_base_class):
"""Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
| mdhaber/scipy | scipy/stats/_distn_infrastructure.py | Python | bsd-3-clause | 146,733 |
# (c) Nelen & Schuurmans. MIT licensed, see LICENSE.rst.
from __future__ import division
import ast
from django.contrib.auth.decorators import permission_required
from django.contrib.contenttypes.models import ContentType
from django.db import transaction
from django.db.models import Q
from django.http import HttpResponse
from django.utils.decorators import method_decorator
from django.views.generic import View
import networkx as nx
from lizard_security.models import DataSet
from ddsc_core.models import LogicalGroup
from ddsc_core.models import LogicalGroupEdge
from ddsc_core.models import Timeseries
from ddsc_core.models import TimeseriesSelectionRule
def timeseries_from_rules(rules):
"""Return a list of timeseries based on selection rules.
Rules are expected to be ordered by entry order (see
class Meta of the TimeseriesSelectionRule model).
The first rule should not have an operator.
"""
for rule in rules:
k, v = rule.criterion.split("=", 1)
v = ast.literal_eval(v)
if rule.operator is None:
q = Q(**{k: v})
elif rule.operator == "&":
q = q & Q(**{k: v})
elif rule.operator == "|":
q = q | Q(**{k: v})
if rules:
return Timeseries.objects.filter(q).only("name")
else:
return []
class LogicalGroupGraph(View):
"""View of the graph of LogicalGroups."""
def get(self, request, *args, **kwargs):
"""Return the graph as a png image."""
current = LogicalGroup.objects.get(pk=kwargs['pk'])
G = nx.DiGraph() # NetworkX directed graph
G.add_nodes_from(LogicalGroup.objects.all())
G.add_edges_from(LogicalGroupEdge.edges())
A = nx.to_agraph(G) # Graphviz agraph
A.graph_attr.update(rankdir="BT")
A.node_attr.update(fontsize=9)
A.get_node(current).attr['color'] = "red"
A.layout(prog="dot")
return HttpResponse(A.draw(format="png"), mimetype="image/png")
class LogicalGroupRulesView(View):
"""Expand the logical group with all timeseries that meet the rules."""
# TODO: use more fine-grained permissions (allow owners to POST too).
@method_decorator(permission_required('is_superuser'))
def dispatch(self, *args, **kwargs):
"""Allow only superusers to POST."""
return super(LogicalGroupRulesView, self).dispatch(*args, **kwargs)
@transaction.commit_on_success
def post(self, request, *args, **kwargs):
logical_group = LogicalGroup.objects.get(**kwargs) # pass pk
content_type = ContentType.objects.get_for_model(logical_group)
rules = TimeseriesSelectionRule.objects.filter(
content_type_id=content_type.pk,
object_id=logical_group.pk
)
logical_group.timeseries.add(*timeseries_from_rules(rules))
return HttpResponse()
class DataSetRulesView(View):
"""Expand the data set with all timeseries that meet the rules."""
# TODO: use more fine-grained permissions (allow owners to POST too).
@method_decorator(permission_required('is_superuser'))
def dispatch(self, *args, **kwargs):
"""Allow only superusers to POST."""
return super(DataSetRulesView, self).dispatch(*args, **kwargs)
@transaction.commit_on_success
def post(self, request, *args, **kwargs):
data_set = DataSet.objects.get(**kwargs) # pass pk
content_type = ContentType.objects.get_for_model(data_set)
rules = TimeseriesSelectionRule.objects.filter(
content_type_id=content_type.pk,
object_id=data_set.pk
)
data_set.timeseries.add(*timeseries_from_rules(rules))
return HttpResponse()
| ddsc/ddsc-core | ddsc_core/views.py | Python | mit | 3,718 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier définissant la classe NoeudMasque détaillée plus bas;"""
import re
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.noeuds.base_noeud import BaseNoeud
from primaires.interpreteur.masque.noeuds.embranchement import Embranchement
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
# Constantes
RE_MASQUE = re.compile(r"^(<?)((.*):)?([^ >]*)(>?)$")
RE_MOT_CLE = re.compile(r"^[A-Za-z]*/[A-Za-z]*$")
class NoeudMasque(BaseNoeud):
"""Noeud masque, noeud contenant un masque.
Sa validation dépend de la validation de ses masques.
De par son statut, s'il possède un noeud suivant, deux possibilités :
- soit c'est un noeud optionnel et le noeud masque que contient ce
noeud fils est optionnel ;
- soit c'est un noeud masque également et le masque contenu dans ce fils
est obligatoire.
"""
def __init__(self, parente, commande):
"""Constructeur du noeud masque"""
BaseNoeud.__init__(self)
self.nom = ""
self.parente = parente # commande parente
self.commande = commande
self.masques = [] # une liste vide de masques
self.defaut = None # valeur par défaut
def construire_depuis_schema(self, lst_schema):
"""Construit le masque depuis le schéma."""
# On convertit la liste en chaîne
schema = liste_vers_chaine(lst_schema)
delimiteurs = ('>', ' ', ')')
fins = [schema.index(dl) for dl in delimiteurs if dl in schema]
pos_fin = min(fins)
fin = schema[pos_fin]
schema = schema[:pos_fin]
lst_schema[:] = lst_schema[pos_fin + 1:]
# On extrait le type du schéma
# Si c'est un mot-clé d'abord
res = RE_MOT_CLE.search(schema)
if res:
self.construire_mot_cle(schema)
return
if fin in ('>', ):
schema += fin
res = RE_MASQUE.search(schema)
if not res:
raise ValueError("le schéma {} n'a pas pu être interprété".format(
schema))
groupes = res.groups()
# Nos groupes sont un tuple constitué de :
# 1. le signe < si présent (sinon None)
# 2. le nom avec le signe ':' (on ne l'utilise pas)
# 3. le nom sans le signe ':' si présent, sinon None
# 4. le type du masque
# 5. le signe > si présent (sinon None)
inf, none, nom, liste_types_masques, sup = groupes
liste_types_masques = liste_types_masques.split("|")
# Nettoyage des inf et sup
if not inf: inf = ""
if not sup: sup = ""
# On cherche le type de masque dans l'interpréteur
# on remplace dans liste_types_masques chaque str par son instance
# de masque.
# Note: si chevrons est à False, on ne cherche pas dans l'interpréteur
# mais dans la commande.
# Si le masque n'existe pas, une exception est levée.
for i, str_type_masque in enumerate(liste_types_masques):
proprietes = ""
if str_type_masque.count("{"):
proprietes = "{" + "{".join(str_type_masque.split("{")[1:])
str_type_masque = str_type_masque.split("{")[0]
type_masque = type(self).importeur.interpreteur.get_masque(
str_type_masque)
type_masque.construire(str_type_masque + proprietes)
liste_types_masques[i] = type_masque
# Si le nom du masque n'est pas défini, on le déduit du premier
# type de masque
if not nom:
nom = liste_types_masques[0].nom
self.nom = nom
self.masques = liste_types_masques
for masque in self.masques:
masque.nom = self.nom
def construire_mot_cle(self, schema):
"""Construit le mot-clé depuis le schéma.
Un mot-clé a une forme assez simple :
francais/anglais
Exemple : depuis/from
"""
francais, anglais = schema.split("/")
self.masques = [type(self).importeur.interpreteur.creer_mot_cle(
francais, anglais)]
@property
def masque(self):
"""Retourne le premier masque."""
return self.masques[0]
def est_parametre(self):
"""Retourne True si le premier masque est un paramètre."""
return self.masque.est_parametre()
def __str__(self):
"""Méthode d'affichage"""
msg = "msg "
msg += self.nom + "["
msg += ", ".join([str(masque) for masque in self.masques])
msg += "]"
if self.suivant:
msg += " : " + str(self.suivant)
return msg
def get_masque(self, nom_masque):
"""Retourne le masque du nom 'nom_masque'"""
if self.nom == nom_masque:
return self.masque
elif self.suivant:
return self.suivant.get_masque(nom_masque)
else:
return None
def repartir(self, personnage, masques, commande, tester_fils=True):
"""Répartit dans le masque si possible."""
lstrip(commande)
for masque in self.masques:
masque.init()
valide = masque.repartir(personnage, masques, commande)
if valide:
break
if valide and self.suivant:
valide = self.suivant.repartir(personnage, masques, commande)
return valide
def valider(self, personnage, dic_masques, tester_fils=True):
"""Validation d'un noeud masque.
On va essayer de valider successivement chaque masque possible. Si
aucun masque ne marche, on s'arrête ici.
"""
valide = False
premiere_erreur = None
for masque in self.masques:
try:
valide = masque.valider(personnage, dic_masques)
except ErreurValidation as err:
if not premiere_erreur:
premiere_erreur = err
valide = False
if not valide:
if premiere_erreur:
raise premiere_erreur
elif self.suivant and tester_fils:
valide = self.suivant.valider(personnage, dic_masques)
return valide
def afficher(self, personnage=None):
"""Retourne un affichage du masque pour les joueurs."""
noms_masques = []
for masque in self.masques:
noms_masques.append(masque.nom_complet_pour(personnage))
msg = ""
if self.masque.est_mot_cle():
msg += " / ".join(noms_masques)
else:
msg += "<" + " / ".join(noms_masques) + ">"
if self.suivant:
msg += " " + self.suivant.afficher(personnage)
return msg
def extraire_masques(self, masques=None):
"""Extraction des masques de la commande."""
for masque in self.masques:
masques[masque.nom] = masque
for fils in self.fils:
if fils:
fils.extraire_masques(masques)
| vlegoff/tsunami | src/primaires/interpreteur/masque/noeuds/noeud_masque.py | Python | bsd-3-clause | 8,636 |
# Dataview.py
#
# Copyright 2019 OSIsoft, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# <http://www.apache.org/licenses/LICENSE-2.0>
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from .DataviewQuery import DataviewQuery
from .DataviewMapping import DataviewMapping
from .DataviewIndexConfig import DataviewIndexConfig
from .DataviewGroupRule import DataviewGroupRule
class Dataview(object):
def __init__(self, id = None, name= None, description = None, queries = [], mappings = None, indexConfig = None, indexDataType =None, groupRules = []):
self.__id = id
self.__name = name
self.__description = description
self.__queries = queries
if mappings:
self.__mappings = mappings
else:
self.__mappings = DataviewMapping()
self.__indexConfig = indexConfig
self.__indexDataType = indexDataType
self.__groupRules = groupRules
"""Sds dataview definition"""
@property
def Id(self):
return self.__id
@Id.setter
def Id(self, id):
self.__id = id
@property
def Name(self):
return self.__name
@Name.setter
def Name(self, name):
self.__name = name
@property
def Description(self):
return self.__description
@Description.setter
def Description(self, description):
self.__description = description
@property
def Queries(self):
return self.__queries
@Queries.setter
def Queries(self, queries):
self.__queries = queries
@property
def Mappings(self):
return self.__mappings
@Mappings.setter
def Mappings(self, mappings):
self.__mappings = mappings
@property
def IndexConfig(self):
return self.__indexConfig
@IndexConfig.setter
def IndexConfig(self, indexConfig):
self.__indexConfig = indexConfig
@property
def IndexDataType(self):
return self.__indexDataType
@IndexDataType.setter
def IndexDataType(self, indexDataType):
self.__indexDataType = indexDataType
@property
def GroupRules(self):
return self.__groupRules
@GroupRules.setter
def GroupRules(self, groupRules):
self.__groupRules = groupRules
def toJson(self):
return json.dumps(self.toDictionary())
def toDictionary(self):
# required properties
dictionary = { 'Id' : self.Id}
dictionary['Queries'] = []
for value in self.Queries:
dictionary['Queries'].append(value.toDictionary())
# optional properties
if hasattr(self, 'Name'):
dictionary['Name'] = self.Name
if hasattr(self, 'Description'):
dictionary['Description'] = self.Description
if hasattr(self, 'Mappings') and self.Mappings is not None:
dictionary['Mappings'] = self.Mappings.toDictionary()
if hasattr(self, 'IndexConfig') and self.IndexConfig is not None:
dictionary['IndexConfig'] = self.IndexConfig.toDictionary()
if hasattr(self, 'IndexDataType'):
dictionary['IndexDataType'] = self.IndexDataType
if hasattr(self, 'GroupRules'):
dictionary['GroupRules'] = []
for value in self.GroupRules:
dictionary['GroupRules'].append(value.toDictionary())
return dictionary
@staticmethod
def fromJson(jsonObj):
return Dataview.fromDictionary(jsonObj)
@staticmethod
def fromDictionary(content):
dataview = Dataview()
if len(content) == 0:
return dataview
if 'Id' in content:
dataview.Id = content['Id']
if 'Name' in content:
dataview.Name = content['Name']
if 'Description' in content:
dataview.Description = content['Description']
if 'Queries' in content:
queries = content['Queries']
if queries is not None and len(queries) > 0:
dataview.Queries = []
for value in queries:
dataview.Queries.append(DataviewQuery.fromDictionary(value))
if 'Mappings' in content:
dataview.Mappings = DataviewMapping.fromDictionary(content['Mappings'])
if 'IndexConfig' in content:
dataview.IndexConfig = DataviewIndexConfig.fromDictionary(content['IndexConfig'])
if 'IndexDataType' in content:
dataview.IndexDataType = content['IndexDataType']
if 'GroupRules' in content:
groupRules = content['GroupRules']
if groupRules is not None and len(groupRules) > 0:
dataview.GroupRules = []
for value in groupRules:
dataview.GroupRules.append(DataviewGroupRule.fromDictionary(value))
return dataview
| osisoft/Qi-Samples | ocs_samples/library_samples/Python3/ocs_sample_library_preview/Dataview/Dataview.py | Python | apache-2.0 | 5,312 |
"""Support for Dyson Pure Cool Link Sensors."""
import logging
from libpurecool.dyson_pure_cool import DysonPureCool
from libpurecool.dyson_pure_cool_link import DysonPureCoolLink
from homeassistant.const import PERCENTAGE, STATE_OFF, TEMP_CELSIUS, TIME_HOURS
from homeassistant.helpers.entity import Entity
from . import DYSON_DEVICES
SENSOR_UNITS = {
"air_quality": None,
"dust": None,
"filter_life": TIME_HOURS,
"humidity": PERCENTAGE,
}
SENSOR_ICONS = {
"air_quality": "mdi:fan",
"dust": "mdi:cloud",
"filter_life": "mdi:filter-outline",
"humidity": "mdi:water-percent",
"temperature": "mdi:thermometer",
}
DYSON_SENSOR_DEVICES = "dyson_sensor_devices"
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dyson Sensors."""
if discovery_info is None:
return
hass.data.setdefault(DYSON_SENSOR_DEVICES, [])
unit = hass.config.units.temperature_unit
devices = hass.data[DYSON_SENSOR_DEVICES]
# Get Dyson Devices from parent component
device_ids = [device.unique_id for device in hass.data[DYSON_SENSOR_DEVICES]]
new_entities = []
for device in hass.data[DYSON_DEVICES]:
if isinstance(device, DysonPureCool):
if f"{device.serial}-temperature" not in device_ids:
new_entities.append(DysonTemperatureSensor(device, unit))
if f"{device.serial}-humidity" not in device_ids:
new_entities.append(DysonHumiditySensor(device))
elif isinstance(device, DysonPureCoolLink):
new_entities.append(DysonFilterLifeSensor(device))
new_entities.append(DysonDustSensor(device))
new_entities.append(DysonHumiditySensor(device))
new_entities.append(DysonTemperatureSensor(device, unit))
new_entities.append(DysonAirQualitySensor(device))
if not new_entities:
return
devices.extend(new_entities)
add_entities(devices)
class DysonSensor(Entity):
"""Representation of a generic Dyson sensor."""
def __init__(self, device, sensor_type):
"""Create a new generic Dyson sensor."""
self._device = device
self._old_value = None
self._name = None
self._sensor_type = sensor_type
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._device.add_message_listener(self.on_message)
def on_message(self, message):
"""Handle new messages which are received from the fan."""
# Prevent refreshing if not needed
if self._old_value is None or self._old_value != self.state:
_LOGGER.debug("Message received for %s device: %s", self.name, message)
self._old_value = self.state
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the Dyson sensor name."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SENSOR_UNITS[self._sensor_type]
@property
def icon(self):
"""Return the icon for this sensor."""
return SENSOR_ICONS[self._sensor_type]
@property
def unique_id(self):
"""Return the sensor's unique id."""
return f"{self._device.serial}-{self._sensor_type}"
class DysonFilterLifeSensor(DysonSensor):
"""Representation of Dyson Filter Life sensor (in hours)."""
def __init__(self, device):
"""Create a new Dyson Filter Life sensor."""
super().__init__(device, "filter_life")
self._name = f"{self._device.name} Filter Life"
@property
def state(self):
"""Return filter life in hours."""
if self._device.state:
return int(self._device.state.filter_life)
return None
class DysonDustSensor(DysonSensor):
"""Representation of Dyson Dust sensor (lower is better)."""
def __init__(self, device):
"""Create a new Dyson Dust sensor."""
super().__init__(device, "dust")
self._name = f"{self._device.name} Dust"
@property
def state(self):
"""Return Dust value."""
if self._device.environmental_state:
return self._device.environmental_state.dust
return None
class DysonHumiditySensor(DysonSensor):
"""Representation of Dyson Humidity sensor."""
def __init__(self, device):
"""Create a new Dyson Humidity sensor."""
super().__init__(device, "humidity")
self._name = f"{self._device.name} Humidity"
@property
def state(self):
"""Return Humidity value."""
if self._device.environmental_state:
if self._device.environmental_state.humidity == 0:
return STATE_OFF
return self._device.environmental_state.humidity
return None
class DysonTemperatureSensor(DysonSensor):
"""Representation of Dyson Temperature sensor."""
def __init__(self, device, unit):
"""Create a new Dyson Temperature sensor."""
super().__init__(device, "temperature")
self._name = f"{self._device.name} Temperature"
self._unit = unit
@property
def state(self):
"""Return Temperature value."""
if self._device.environmental_state:
temperature_kelvin = self._device.environmental_state.temperature
if temperature_kelvin == 0:
return STATE_OFF
if self._unit == TEMP_CELSIUS:
return float(f"{(temperature_kelvin - 273.15):.1f}")
return float(f"{(temperature_kelvin * 9 / 5 - 459.67):.1f}")
return None
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
class DysonAirQualitySensor(DysonSensor):
"""Representation of Dyson Air Quality sensor (lower is better)."""
def __init__(self, device):
"""Create a new Dyson Air Quality sensor."""
super().__init__(device, "air_quality")
self._name = f"{self._device.name} AQI"
@property
def state(self):
"""Return Air Quality value."""
if self._device.environmental_state:
return int(self._device.environmental_state.volatil_organic_compounds)
return None
| sdague/home-assistant | homeassistant/components/dyson/sensor.py | Python | apache-2.0 | 6,457 |
from django.db import models
class ReportHasCategories(models.Model):
report = models.ForeignKey('Report')
category = models.ForeignKey('Category')
def __str__(self):
return 'ReportHasCategories with id - {}'.format(self.id)
| slavi104/cashtracker | cashtracker_project/app_cashtracker/models/ReportHasCategories.py | Python | lgpl-3.0 | 248 |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Genotypes used by NAS."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import slim as contrib_slim
from third_party.deeplab.core import nas_cell
slim = contrib_slim
class PNASCell(nas_cell.NASBaseCell):
"""Configuration and construction of the PNASNet-5 Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, batch_norm_fn=slim.batch_norm):
# Name of operations: op_kernel-size_num-layers.
operations = [
'separable_5x5_2', 'max_pool_3x3', 'separable_7x7_2', 'max_pool_3x3',
'separable_5x5_2', 'separable_3x3_2', 'separable_3x3_2', 'max_pool_3x3',
'separable_3x3_2', 'none'
]
used_hiddenstates = [1, 1, 0, 0, 0, 0, 0]
hiddenstate_indices = [1, 1, 0, 0, 0, 0, 4, 0, 1, 0]
super(PNASCell, self).__init__(
num_conv_filters, operations, used_hiddenstates, hiddenstate_indices,
drop_path_keep_prob, total_num_cells, total_training_steps,
batch_norm_fn)
| googleinterns/wss | third_party/deeplab/core/nas_genotypes.py | Python | apache-2.0 | 1,804 |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import base64
import datetime
import mimetypes
import six
from parse_rest.connection import API_ROOT, ParseBase
from parse_rest.query import QueryManager
from parse_rest.core import ParseError
def complex_type(name=None):
'''Decorator for registering complex types'''
def wrapped(cls):
ParseType.type_mapping[name or cls.__name__] = cls
return cls
return wrapped
class ParseType(object):
type_mapping = {}
@staticmethod
def convert_from_parse(parse_key, parse_data):
parse_type = None
if isinstance(parse_data, dict):
if '__type' in parse_data:
parse_type = parse_data.pop('__type')
elif parse_key == 'ACL':
parse_type = 'ACL'
# if its not a parse type -- simply return it. This means it wasn't a "special class"
if not parse_type:
return parse_data
native = ParseType.type_mapping.get(parse_type)
return native.from_native(**parse_data) if native else parse_data
@staticmethod
def convert_to_parse(python_object, as_pointer=False):
is_object = isinstance(python_object, ParseResource) #User is derived from ParseResouce not Object, check against ParseResource
if is_object and not as_pointer:
return dict([(k, ParseType.convert_to_parse(v, as_pointer=True))
for k, v in python_object._editable_attrs.items()
])
python_type = ParseResource if is_object else type(python_object)
# classes that need to be cast to a different type before serialization
transformation_map = {
datetime.datetime: Date,
ParseResource: Pointer
}
if (hasattr(python_object, '__iter__') and
not isinstance(python_object, (six.string_types[0], ParseType))):
# It's an iterable? Repeat this whole process on each object
if isinstance(python_object, dict):
for key, value in python_object.iteritems():
python_object[key]=ParseType.convert_to_parse(value, as_pointer=as_pointer)
return python_object
else:
return [ParseType.convert_to_parse(o, as_pointer=as_pointer)
for o in python_object]
if python_type in transformation_map:
klass = transformation_map.get(python_type)
return klass(python_object)._to_native()
if isinstance(python_object, ParseType):
return python_object._to_native()
return python_object
@classmethod
def from_native(cls, **kw):
return cls(**kw)
def _to_native(self):
raise NotImplementedError("_to_native must be overridden")
@complex_type('Pointer')
class Pointer(ParseType):
@classmethod
def from_native(cls, **kw):
# create object with only objectId and unloaded flag. it is automatically loaded when any other field is accessed
klass = Object.factory(kw.get('className'))
return klass(objectId=kw.get('objectId'), _is_loaded=False)
def __init__(self, obj):
self._object = obj
def _to_native(self):
return {
'__type': 'Pointer',
'className': self._object.className,
'objectId': self._object.objectId
}
@complex_type('Object')
class EmbeddedObject(ParseType):
@classmethod
def from_native(cls, **kw):
klass = Object.factory(kw.pop('className'))
return klass(**kw)
@complex_type()
class Relation(ParseType):
@classmethod
def from_native(cls, **kw):
pass
@complex_type()
class Date(ParseType):
FORMAT = '%Y-%m-%dT%H:%M:%S.%f%Z'
@classmethod
def from_native(cls, **kw):
return cls._from_str(kw.get('iso', ''))
@staticmethod
def _from_str(date_str):
"""turn a ISO 8601 string into a datetime object"""
return datetime.datetime.strptime(date_str[:-1] + 'UTC', Date.FORMAT)
def __init__(self, date):
"""Can be initialized either with a string or a datetime"""
if isinstance(date, datetime.datetime):
self._date = date
elif isinstance(date, six.string_types):
self._date = Date._from_str(date)
def _to_native(self):
return { #parse expects an iso8601 with 3 digits milliseonds and not 6
'__type': 'Date', 'iso': '{0}Z'.format(self._date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3])
}
@complex_type('Bytes')
class Binary(ParseType):
@classmethod
def from_native(cls, **kw):
return cls(kw.get('base64', ''))
def __init__(self, encoded_string):
self._encoded = encoded_string
self._decoded = str(base64.b64decode(self._encoded))
def _to_native(self):
return {'__type': 'Bytes', 'base64': self._encoded}
@complex_type()
class GeoPoint(ParseType):
@classmethod
def from_native(cls, **kw):
return cls(kw.get('latitude'), kw.get('longitude'))
def __init__(self, latitude, longitude):
self.latitude = latitude
self.longitude = longitude
def _to_native(self):
return {
'__type': 'GeoPoint',
'latitude': self.latitude,
'longitude': self.longitude
}
@complex_type()
class File(ParseType, ParseBase):
ENDPOINT_ROOT = '/'.join([API_ROOT, 'files'])
@classmethod
def from_native(cls, **kw):
return cls(**kw)
def __init__(self, name, content=None, mimetype=None, url=None):
self._name = name
self._file_url = url
self._api_url = '/'.join([API_ROOT, 'files', name])
self._content = content
self._mimetype = mimetype or mimetypes.guess_type(name)
if not content and not url:
with open(name) as f:
content = f.read()
self._content = content
def __repr__(self):
return '<File:%s>' % (getattr(self, '_name', None))
def _to_native(self):
return {
'__type': 'File',
'name': self._name,
'url': self._file_url
}
def save(self, batch=False):
if self.url is not None:
raise ParseError("Files can't be overwritten")
uri = '/'.join([self.__class__.ENDPOINT_ROOT, self.name])
headers = {'Content-type': self.mimetype}
response = self.__class__.POST(uri, extra_headers=headers, batch=batch, body=self._content)
self._file_url = response['url']
self._name = response['name']
self._api_url = '/'.join([API_ROOT, 'files', self._name])
if batch:
return response, lambda response_dict: None
def delete(self, batch=False):
uri = "/".join(self.__class__.ENDPOINT_ROOT, self.name)
response = self.__class__.DELETE(uri, batch=batch)
if batch:
return response, lambda response_dict: None
mimetype = property(lambda self: self._mimetype)
url = property(lambda self: self._file_url)
name = property(lambda self: self._name)
_absolute_url = property(lambda self: self._api_url)
@complex_type()
class ACL(ParseType):
@classmethod
def from_native(cls, **kw):
return cls(kw)
def __init__(self, acl=None):
self._acl = acl or {}
def _to_native(self):
return self._acl
def __repr__(self):
return '%s(%s)' % (type(self).__name__, repr(self._acl))
def set_default(self, read=False, write=False):
self._set_permission("*", read, write)
def set_role(self, role, read=False, write=False):
if isinstance(role, ParseResource):
self._set_permission("role:%s" % role.name, read, write)
else:
self._set_permission("role:%s" % role, read, write)
def set_user(self, user, read=False, write=False):
if isinstance(user, ParseResource):
self._set_permission(user.objectId, read, write)
else:
self._set_permission(user, read, write)
def set_all(self, permissions):
self._acl.clear()
for k, v in permissions.items():
self._set_permission(k, **v)
def _set_permission(self, name, read=False, write=False):
permissions = {}
if read is True:
permissions["read"] = True
if write is True:
permissions["write"] = True
if len(permissions):
self._acl[name] = permissions
else:
self._acl.pop(name, None)
class Function(ParseBase):
ENDPOINT_ROOT = '/'.join((API_ROOT, 'functions'))
def __init__(self, name):
self.name = name
def __call__(self, **kwargs):
return self.POST('/' + self.name, **kwargs)
class ParseResource(ParseBase):
PROTECTED_ATTRIBUTES = ['objectId', 'createdAt', 'updatedAt']
@property
def _editable_attrs(self):
protected_attrs = self.__class__.PROTECTED_ATTRIBUTES
allowed = lambda a: a not in protected_attrs and not a.startswith('_')
return dict([(k, v) for k, v in self.__dict__.items() if allowed(k)])
def __init__(self, **kw):
self.objectId = None
self._init_attrs(kw)
def __getattr__(self, attr):
# if object is not loaded and attribute is missing, try to load it
if not self.__dict__.get('_is_loaded', True):
del self._is_loaded
self._init_attrs(self.GET(self._absolute_url))
return object.__getattribute__(self, attr) #preserve default if attr not exists
def _init_attrs(self, args):
for key, value in six.iteritems(args):
setattr(self, key, ParseType.convert_from_parse(key, value))
def _to_native(self):
return ParseType.convert_to_parse(self)
def _get_updated_datetime(self):
return self.__dict__.get('_updated_at') and self._updated_at._date
def _set_updated_datetime(self, value):
self._updated_at = Date(value)
def _get_created_datetime(self):
return self.__dict__.get('_created_at') and self._created_at._date
def _set_created_datetime(self, value):
self._created_at = Date(value)
def save(self, batch=False):
if self.objectId:
return self._update(batch=batch)
else:
return self._create(batch=batch)
def _create(self, batch=False):
uri = self.__class__.ENDPOINT_ROOT
response = self.__class__.POST(uri, batch=batch, **self._to_native())
def call_back(response_dict):
self.createdAt = self.updatedAt = response_dict['createdAt']
self.objectId = response_dict['objectId']
if batch:
return response, call_back
else:
call_back(response)
def _update(self, batch=False):
response = self.__class__.PUT(self._absolute_url, batch=batch, **self._to_native())
def call_back(response_dict):
self.updatedAt = response_dict['updatedAt']
if batch:
return response, call_back
else:
call_back(response)
def delete(self, batch=False):
response = self.__class__.DELETE(self._absolute_url, batch=batch)
if batch:
return response, lambda response_dict: None
@property
def className(self):
return self.__class__.__name__
@property
def _absolute_url(self):
return '%s/%s' % (self.__class__.ENDPOINT_ROOT, self.objectId)
createdAt = property(_get_created_datetime, _set_created_datetime)
updatedAt = property(_get_updated_datetime, _set_updated_datetime)
def __repr__(self):
return '<%s:%s>' % (self.__class__.__name__, self.objectId)
class ObjectMetaclass(type):
def __new__(mcs, name, bases, dct):
cls = super(ObjectMetaclass, mcs).__new__(mcs, name, bases, dct)
# attr check must be here because of specific six.with_metaclass implemetantion where metaclass is used also for
# internal NewBase which hasn't set_endpoint_root method
if hasattr(cls, 'set_endpoint_root'):
cls.set_endpoint_root()
cls.Query = QueryManager(cls)
return cls
class Object(six.with_metaclass(ObjectMetaclass, ParseResource)):
ENDPOINT_ROOT = '/'.join([API_ROOT, 'classes'])
@classmethod
def factory(cls, class_name):
"""find proper Object subclass matching class_name
system types like _User are mapped to types without underscore (parse_resr.user.User)
If user don't declare matching type, class is created on the fly
"""
class_name = str(class_name.lstrip('_'))
types = ParseResource.__subclasses__()
while types:
t = types.pop()
if t.__name__ == class_name:
return t
types.extend(t.__subclasses__())
else:
return type(class_name, (Object,), {})
@classmethod
def set_endpoint_root(cls):
root = '/'.join([API_ROOT, 'classes', cls.__name__])
if cls.ENDPOINT_ROOT != root:
cls.ENDPOINT_ROOT = root
return cls.ENDPOINT_ROOT
@property
def _absolute_url(self):
if not self.objectId:
return None
return '/'.join([self.__class__.ENDPOINT_ROOT, self.objectId])
@property
def as_pointer(self):
return Pointer(self)
def increment(self, key, amount=1):
"""
Increment one value in the object. Note that this happens immediately:
it does not wait for save() to be called
"""
payload = {
key: {
'__op': 'Increment',
'amount': amount
}
}
self.__class__.PUT(self._absolute_url, **payload)
self.__dict__[key] += amount
def removeRelation(self, key, className, objectsId):
self.manageRelation('RemoveRelation', key, className, objectsId)
def addRelation(self, key, className, objectsId):
self.manageRelation('AddRelation', key, className, objectsId)
def manageRelation(self, action, key, className, objectsId):
objects = [{
"__type": "Pointer",
"className": className,
"objectId": objectId
} for objectId in objectsId]
payload = {
key: {
"__op": action,
"objects": objects
}
}
self.__class__.PUT(self._absolute_url, **payload)
self.__dict__[key] = ''
| GbalsaC/bitnamiP | ParsePy/parse_rest/datatypes.py | Python | agpl-3.0 | 15,251 |
import csv
import os
from django.core.management.base import BaseCommand
import resultsbot
from resultsbot.importers.modgov import ModGovElectionMatcher
from uk_results.helpers import read_csv_from_url
class Command(BaseCommand):
# def add_arguments(self, parser):
# parser.add_argument(
# '--url',
# action='store',
# required=True
# )
# parser.add_argument(
# '--election_id',
# action='store',
# required=True
# )
def handle(self, **options):
id_to_url = {}
path = os.path.join(
os.path.dirname(resultsbot.__file__), "election_id_to_url.csv"
)
with open(path) as f:
csv_file = csv.reader(f)
for line in csv_file:
try:
id_to_url[line[0]] = line[1]
except IndexError:
continue
url = "https://docs.google.com/spreadsheets/d/e/2PACX-1vTI29qljbILmtJ9QD0WUAjufnwWo-vb3Yn0kfmv0BbS7rNm-WPZ_sdWZyLY_L4vHmbw41gKkzIl94__/pub?output=csv"
data = []
for row in read_csv_from_url(url):
uses_mg = row.get("Uses MG?") or ""
if uses_mg.strip().upper() != "Y":
continue
election_id = row["Election ID"]
url = row["ModGov Install"]
if not election_id or not url:
continue
data.append((election_id, url))
for election_id, url in data:
if election_id in id_to_url.keys():
continue
print(election_id)
matcher = ModGovElectionMatcher(
base_domain=url, election_id=election_id
)
try:
matcher.find_elections()
election = matcher.match_to_election()
except KeyboardInterrupt:
raise
except Exception as e:
print("Error on {} ({})".format(election_id, e))
continue
if election:
# print("This is the URL for the given election")
print("{},{}".format(election_id, election.url))
with open(path, "a") as f:
f.write("\n{},{}".format(election_id, election.url))
else:
print("No URL found for {}!".format(election_id))
print("\tHighest ID was {}".format(matcher.lookahead))
print("\tTry the following for debugging:")
print("\t" + matcher.format_elections_html_url())
print("\t" + matcher.format_elections_api_url())
| DemocracyClub/yournextrepresentative | ynr/apps/resultsbot/management/commands/resultsbot_match_elections_for_mg_url.py | Python | agpl-3.0 | 2,653 |
# -*- coding: utf-8 -*-
import pytest
import random
from random_words import RandomWords
from random_words import RandomNicknames
from random_words import RandomEmails
from random_words import LoremIpsum
class TestsRandomWords:
@classmethod
def setup_class(cls):
cls.rw = RandomWords()
cls.letters = 'qwertyuiopasdfghjklzcvbnm'
def test_random_word(self):
for letter in self.letters:
word = self.rw.random_word(letter)
assert word[0] == letter
def test_random_word_value_error(self):
pytest.raises(ValueError, self.rw.random_word, 'x')
pytest.raises(ValueError, self.rw.random_word, 0)
pytest.raises(ValueError, self.rw.random_word, -1)
pytest.raises(ValueError, self.rw.random_word, 9)
pytest.raises(ValueError, self.rw.random_word, ['u'])
pytest.raises(ValueError, self.rw.random_word, 'fs')
def test_random_words(self):
for letter in self.letters:
words = self.rw.random_words(letter)
for word in words:
assert word[0] == letter
def test_random_words_value_error(self):
pytest.raises(ValueError, self.rw.random_words, 'fa')
pytest.raises(ValueError, self.rw.random_words, ['fha'])
pytest.raises(ValueError, self.rw.random_words, 0)
pytest.raises(ValueError, self.rw.random_words, -1)
pytest.raises(ValueError, self.rw.random_words, letter=None,
count=1000000)
pytest.raises(ValueError, self.rw.random_words, count=0)
pytest.raises(ValueError, self.rw.random_words, count=None)
pytest.raises(ValueError, self.rw.random_words, count=[8])
pytest.raises(ValueError, self.rw.random_words, count=-5)
def test_random_words_length_list(self):
len_words = len(self.rw.random_words())
assert len_words == 1
len_words = len(self.rw.random_words(count=10))
assert len_words == 10
len_words = len(self.rw.random_words(count=73))
assert len_words == 73
for letter in self.letters:
len_random = random.randint(1, 3)
len_words = len(self.rw.random_words(letter, count=len_random))
assert len_words == len_random
len_random = 1000000
for letter in self.letters:
pytest.raises(
ValueError, self.rw.random_words, letter, count=len_random)
class TestsRandomNicknames:
@classmethod
def setup_class(cls):
cls.rn = RandomNicknames()
cls.letters = 'qwertyuiopasdfghjklzxcvbnm'
def test_random_nick(self):
genders = ('f', 'm')
for letter in self.letters:
nick = self.rn.random_nick(letter, random.choice(genders))
assert nick[0].lower() == letter
def test_random_nick_value_error(self):
pytest.raises(ValueError, self.rn.random_nick, 'ą')
pytest.raises(ValueError, self.rn.random_nick, 'Ź', 'f')
pytest.raises(ValueError, self.rn.random_nick, 'ż', 'm')
pytest.raises(ValueError, self.rn.random_nick, 'ą', None)
pytest.raises(ValueError, self.rn.random_nick, 'ż')
pytest.raises(ValueError, self.rn.random_nick, 0)
pytest.raises(ValueError, self.rn.random_nick, -1, None)
pytest.raises(ValueError, self.rn.random_nick, 9)
pytest.raises(ValueError, self.rn.random_nick, 9, None)
pytest.raises(ValueError, self.rn.random_nick, ['u'])
pytest.raises(ValueError, self.rn.random_nick, 'fs')
def test_random_nicks(self):
genders = ('f', 'm')
for letter in self.letters:
nicks = self.rn.random_nicks(letter, random.choice(genders))
for nick in nicks:
assert nick[0].lower() == letter
def test_random_nicks_count(self):
len_random = 1000000
for letter in self.letters:
pytest.raises(
ValueError, self.rn.random_nicks, letter, count=len_random)
def test_random_nicks_letter_must_be_string(self):
pytest.raises(ValueError, self.rn.random_nicks, 0)
def test_random_nicks_not_gender(self):
pytest.raises(ValueError, self.rn.random_nicks, gender=[])
pytest.raises(ValueError, self.rn.random_nicks, gender=())
pytest.raises(ValueError, self.rn.random_nicks, gender=set())
pytest.raises(ValueError, self.rn.random_nicks, gender=frozenset())
pytest.raises(ValueError, self.rn.random_nicks, gender="")
pytest.raises(ValueError, self.rn.random_nicks, gender="dż")
pytest.raises(ValueError, self.rn.random_nicks, gender="wtf")
def test_random_nicks_gender_value_error(self):
pytest.raises(
ValueError, self.rn.random_nicks, letter=None, gender="f",
count=1000000)
pytest.raises(
ValueError, self.rn.random_nicks, letter=None, gender="m",
count=1000000)
class TestsRandomLoremIpsum:
@classmethod
def setup_class(cls):
cls.li = LoremIpsum()
def test_lorem_sentence(self):
assert '.' in self.li.get_sentence()
assert '.' == self.li.get_sentence()[-1]
def test_lorem_sentences(self):
assert '.' in self.li.get_sentences()
assert '.' == self.li.get_sentence()[-1]
def test_lorem_sentences_list(self):
assert type(self.li.get_sentences_list()) == list
def test_lorem_value_error(self):
pytest.raises(ValueError, self.li.get_sentences, sentences=0)
pytest.raises(ValueError, self.li.get_sentences, sentences=-2)
class TestsRandomEmails:
@classmethod
def setup_class(cls):
cls.re = RandomEmails()
def test_random_mail_type(self):
assert '@' in self.re.randomMail()
def test_random_mails_type(self):
assert type(self.re.randomMails()) == list
def test_random_mails_count(self):
pytest.raises(ValueError, self.re.randomMails, count=-1)
pytest.raises(ValueError, self.re.randomMails, count=0)
pytest.raises(ValueError, self.re.randomMails, count=list())
pytest.raises(ValueError, self.re.randomMails, count=dict())
pytest.raises(ValueError, self.re.randomMails, count=set())
| tomislater/RandomWords | random_words/test/test_random_words.py | Python | mit | 6,253 |
__version__ = '1.2.8'
| ricotabor/opendrop | opendrop/vendor/harvesters/__init__.py | Python | gpl-2.0 | 22 |
# -*- Mode: Python; py-indent-offset: 4 -*-
# vim: tabstop=4 shiftwidth=4 expandtab
#
# Copyright (C) 2013 Simon Feltman <sfeltman@gnome.org>
#
# docstring.py: documentation string generator for gi.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
# USA
from ._gi import \
VFuncInfo, \
FunctionInfo, \
Direction
#: Module storage for currently registered doc string generator function.
_generate_doc_string_func = None
def set_doc_string_generator(func):
"""Set doc string generator function
:Parameters:
func : callable
Function which takes a GIInfoStruct and returns
documentation for it.
"""
global _generate_doc_string_func
_generate_doc_string_func = func
def get_doc_string_generator():
return _generate_doc_string_func
def generate_doc_string(info):
"""Generator a doc string given a GIInfoStruct
This passes the info struct to the currently registered doc string
generator and returns the result.
"""
return _generate_doc_string_func(info)
def split_function_info_args(info):
"""Split a functions args into a tuple of two lists.
Note that args marked as Direction.INOUT will be in both lists.
:Returns:
Tuple of (in_args, out_args)
"""
in_args = []
out_args = []
for arg in info.get_arguments():
direction = arg.get_direction()
if direction in (Direction.IN, Direction.INOUT):
in_args.append(arg)
if direction in (Direction.OUT, Direction.INOUT):
out_args.append(arg)
return (in_args, out_args)
def _generate_callable_info_function_signature(info):
"""Default doc string generator"""
in_args, out_args = split_function_info_args(info)
in_args_strs = []
if isinstance(info, VFuncInfo):
in_args_strs = ['self']
elif isinstance(info, FunctionInfo):
if info.is_method():
in_args_strs = ['self']
elif info.is_constructor():
in_args_strs = ['cls']
# Build a lists of indices prior to adding the docs because
# because it is possible the index retrieved comes before in
# argument being used.
ignore_indices = set([arg.get_destroy() for arg in in_args])
user_data_indices = set([arg.get_closure() for arg in in_args])
for i, arg in enumerate(in_args):
if i in ignore_indices:
continue
argstr = arg.get_name()
hint = arg.get_pytype_hint()
if hint not in ('void',):
argstr += ':' + hint
if arg.may_be_null() or i in user_data_indices:
# allow-none or user_data from a closure
argstr += '=None'
elif arg.is_optional():
argstr += '=<optional>'
in_args_strs.append(argstr)
in_args_str = ', '.join(in_args_strs)
if out_args:
out_args_str = ', '.join(arg.get_name() + ':' + arg.get_pytype_hint()
for arg in out_args)
return '%s(%s) -> %s' % (info.get_name(), in_args_str, out_args_str)
else:
return '%s(%s)' % (info.get_name(), in_args_str)
set_doc_string_generator(_generate_callable_info_function_signature)
| onia/pygobject | gi/docstring.py | Python | lgpl-2.1 | 3,853 |
import csv
import io
import sys
from functools import reduce
from retriever.lib.cleanup import correct_invalid_value, Cleanup
class Dataset():
"""Dataset generic properties"""
def __init__(self, name=None, url=None):
self.name = name
self.url = url
class TabularDataset(Dataset):
"""Tabular database table."""
def __init__(self,
name=None,
url=None,
pk=True,
contains_pk=False,
delimiter=None,
header_rows=1,
column_names_row=1,
fixed_width=False,
cleanup=Cleanup(),
record_id=0,
columns=[],
replace_columns=[],
missingValues=None,
cleaned_columns=False,
number_of_records=None,
**kwargs):
self.name = name
self.url = url
self.pk = pk
self.contains_pk = contains_pk
self.delimiter = delimiter
self.header_rows = header_rows
self.column_names_row = column_names_row
self.fixed_width = fixed_width
self.cleanup = cleanup
self.record_id = record_id
self.columns = columns
self.replace_columns = replace_columns
self.missingValues = missingValues
self.cleaned_columns = cleaned_columns
self.dataset_type = "TabularDataset"
self.number_of_records = number_of_records
for key in kwargs:
if hasattr(self, key):
self.key = kwargs[key]
else:
setattr(self, key, kwargs[key])
if hasattr(self, 'schema'):
self.add_schema()
if hasattr(self, 'dialect'):
self.add_dialect()
Dataset.__init__(self, self.name, self.url)
def add_dialect(self):
"""Initialize dialect table properties.
These include a table's null or missing values,
the delimiter, the function to perform on missing values
and any values in the dialect's dict.
"""
for key, _ in self.dialect.items():
if key == "missingValues":
if self.dialect["missingValues"]:
self.missingValues = self.dialect["missingValues"]
self.cleanup = Cleanup(correct_invalid_value,
missingValues=self.missingValues)
elif key == "delimiter":
self.delimiter = str(self.dialect["delimiter"])
else:
setattr(self, key, self.dialect[key])
def add_schema(self):
"""Add a schema to the table object.
Define the data type for the columns in the table.
"""
spec_data_types = {
# Dict to map retriever and frictionless data types.
# spec types not defined, default to char
"integer": "int",
"object": "bigint",
"number": "double",
"string": "char",
"boolean": "bool",
"year": "int",
# Retriever specific data types
"auto": "auto",
"int": "int",
"bigint": "bigint",
"double": "double",
"decimal": "decimal",
"char": "char",
"bool": "bool",
"skip": "skip"
}
for key in self.schema:
if key == "fields":
column_list = []
for obj in self.schema["fields"]:
type = None
if str(obj["type"]).startswith("pk-") or str(
obj["type"]).startswith("ct-"):
type = obj["type"]
else:
type = spec_data_types.get(obj["type"], "char")
if "size" in obj:
column_list.append((obj["name"], (type, obj["size"])))
else:
column_list.append((obj["name"], (type,)))
self.columns = column_list
elif key == "ct_column":
setattr(self, key, "'" + self.schema[key] + "'")
else:
setattr(self, key, self.schema[key])
def auto_get_columns(self, header):
"""Get column names from the header row.
Identifies the column names from the header row.
Replaces database keywords with alternatives.
Replaces special characters and spaces.
"""
columns = [self.clean_column_name(x) for x in header]
column_values = {x: [] for x in columns if x}
self.cleaned_columns = True
return [[x, None] for x in columns if x], column_values
def clean_column_name(self, column_name):
"""Clean column names using the expected sql guidelines
remove leading whitespaces, replace sql key words, etc.
"""
column_name = column_name.lower().strip().replace("\n", "")
replace_columns = {old.lower(): new.lower() for old, new in self.replace_columns}
column_name = str(replace_columns.get(column_name, column_name).strip())
replace = [
("%", "percent"),
("&", "and"),
("\xb0", "degrees"),
("^", "_power_"),
("<", "_lthn_"),
(">", "_gthn_"),
("@", "_at_"),
]
replace += [(x, '') for x in (")", "?", "#", ";", "\n", "\r", '"', "'")]
replace += [(x, '_') for x in (" ", "(", "/", ".", "+", "-", "*", ":", "[", "]")]
column_name = reduce(lambda x, y: x.replace(*y), replace, column_name)
while "__" in column_name:
column_name = column_name.replace("__", "_")
column_name = column_name.lstrip("0123456789_").rstrip("_")
replace_dict = {
"group": "grp",
"order": "ordered",
"check": "checked",
"references": "refs",
"long": "lon",
"column": "columns",
"cursor": "cursors",
"delete": "deleted",
"insert": "inserted",
"join": "joins",
"select": "selects",
"table": "tables",
"update": "updates",
"date": "record_date",
"index": "indices",
"repeat": "repeats",
"system": "systems",
"class": "classes",
"left": "vleft",
"right": "vright",
"union": "unions"
}
for x in (")", "\n", "\r", '"', "'"):
replace_dict[x] = ''
for x in (" ", "(", "/", ".", "-"):
replace_dict[x] = '_'
return replace_dict.get(column_name, column_name)
def combine_on_delimiter(self, line_as_list):
"""Combine a list of values into a line of csv data."""
dialect = csv.excel
dialect.escapechar = "\\"
if sys.version_info >= (3, 0):
writer_file = io.StringIO()
else:
writer_file = io.BytesIO()
writer = csv.writer(writer_file, dialect=dialect, delimiter=self.delimiter)
writer.writerow(line_as_list)
return writer_file.getvalue()
def values_from_line(self, line):
"""Return expected row values
Includes dynamically generated field values like auto pk
"""
linevalues = []
if self.columns[0][1][0] == 'pk-auto':
column = 1
else:
column = 0
for value in line:
try:
this_column = self.columns[column][1][0]
# If data type is "skip" ignore the value
if this_column == "skip":
pass
elif this_column == "combine":
# If "combine" append value to end of previous column
linevalues[-1] += " " + value
else:
# Otherwise, add new value
linevalues.append(value)
except:
# too many values for columns; ignore
pass
column += 1
# make sure we have enough values by padding with None
keys = self.get_insert_columns(join=False, create=False)
if len(linevalues) < len(keys):
linevalues.extend([None for _ in range(len(keys) - len(linevalues))])
return linevalues
def get_insert_columns(self, join=True, create=False):
"""Get column names for insert statements.
`create` should be set to `True` if the returned values are going to be used
for creating a new table. It includes the `pk_auto` column if present. This
column is not included by default because it is not used when generating
insert statements for database management systems.
"""
columns = []
if not self.cleaned_columns:
column_names = list(self.columns)
self.columns[:] = []
self.columns = [
(self.clean_column_name(name[0]), name[1]) for name in column_names
]
self.cleaned_columns = True
for item in self.columns:
if not create and item[1][0] == 'pk-auto':
# don't include this columns if create=False
continue
thistype = item[1][0]
if thistype not in ('skip', 'combine'):
columns.append(item[0])
if join:
return ", ".join(columns)
return columns
def get_column_datatypes(self):
"""Get set of column names for insert statements."""
columns = []
for item in self.get_insert_columns(False):
for column in self.columns:
if item == column[0]:
columns.append(column[1][0])
return columns
class RasterDataset(Dataset):
"""Raster table implementation"""
def __init__(self, name=None, url=None, dataset_type="RasterDataset", **kwargs):
self.name = name
self.group = None
self.relative_path = 0
self.resolution = None
self.resolution_units = None
self.dimensions = None
self.noDataValue = None
self.geoTransform = None
self.parameter = None
self.extent = None
self.dataset_type = dataset_type
self.url = url
for key in kwargs:
setattr(self, key, kwargs[key])
Dataset.__init__(self, self.name, self.url)
class VectorDataset(Dataset):
"""Vector table implementation."""
def __init__(self, name=None, url=None, dataset_type="VectorDataset", **kwargs):
self.name = name
self.pk = None
self.contains_pk = False
self.feature_count = 0
self.attributes = []
self.attributes_dict = {}
self.fields_dict = {}
self.extent = {}
self.saptialref = None
self.dataset_type = dataset_type
self.url = url
for key in kwargs:
if hasattr(self, key):
self.key = kwargs[key]
else:
setattr(self, key, kwargs[key])
Dataset.__init__(self, self.name, self.url)
myTables = {
"vector": VectorDataset,
"raster": RasterDataset,
"tabular": TabularDataset,
}
| henrykironde/deletedret | retriever/lib/table.py | Python | mit | 11,293 |
import json
import pytest
from unittest import mock
from datetime import datetime
from storm.expr import Ne, Select
from stoqlib.domain.overrides import ProductBranchOverride
from stoqlib.domain.payment.card import CreditCardData, CreditProvider
from stoqlib.domain.payment.method import PaymentMethod
from stoqlib.domain.payment.payment import Payment
from stoqlib.domain.person import LoginUser
from stoqlib.domain.sale import Sale
from stoqlib.domain.sellable import Sellable
from stoqlib.domain.station import BranchStation
from stoqlib.domain.till import Till
from stoqlib.lib.formatters import raw_document
from stoqlib.lib.parameters import sysparam
from stoqserver.api.resources.b1food import (_check_if_uuid,
_get_card_name,
_get_card_description,
_get_category_info,
_get_payment_method_with_provider_code,
_get_credit_provider_description,
_get_payment_method_description,
_get_payment_method_name,
_get_person_names,
generate_b1food_token)
@pytest.fixture
def sale(example_creator, current_user, current_station):
test_sale = example_creator.create_sale()
test_sale.branch = current_station.branch
test_sale.open_date = datetime.strptime('2020-01-02', '%Y-%m-%d')
test_sale.confirm_date = datetime.strptime('2020-01-02', '%Y-%m-%d')
test_sale.invoice.key = '33200423335270000159650830000000181790066862'
sale_item = example_creator.create_sale_item(test_sale)
sale_item.price = 10
sale_item.base_price = 15
sellable_category = example_creator.create_sellable_category(description='Category 1')
sale_item.sellable.category = sellable_category
sale_item.sellable.code = '1111111111111'
client = example_creator.create_client()
client.person.individual.cpf = '737.948.760-40'
test_sale.client = client
person = example_creator.create_person()
person.login_user = current_user
test_sale.salesperson.person = person
payment = example_creator.create_payment(group=test_sale.group, payment_type=Payment.TYPE_IN)
payment.paid_value = 10
card_payment = example_creator.create_card_payment(payment_type=Payment.TYPE_IN,
payment_value=5)
card_payment.group = test_sale.group
card_payment.paid_value = 5
sale_item.icms_info.v_icms = 1
sale_item.icms_info.p_icms = 18
return test_sale
@pytest.fixture
def sale_with_cnpj(example_creator, current_user, current_station):
test_sale = example_creator.create_sale()
test_sale.branch = current_station.branch
test_sale.open_date = datetime.strptime('2020-01-02', '%Y-%m-%d')
test_sale.confirm_date = datetime.strptime('2020-01-02', '%Y-%m-%d')
sale_item = example_creator.create_sale_item(test_sale)
sale_item.price = 10
sale_item.base_price = 15
sellable_category = example_creator.create_sellable_category(description='Category 1')
sale_item.sellable.category = sellable_category
sale_item.sellable.code = '1111111111111'
client = example_creator.create_client()
company = example_creator.create_company()
company.cnpj = '35.600.423/0001-27'
client.person.individual = None
test_sale.client = client
test_sale.client.person.company = company
person = example_creator.create_person()
person.login_user = current_user
test_sale.salesperson.person = person
payment = example_creator.create_payment(group=test_sale.group)
payment.payment_type = Payment.TYPE_IN
payment.paid_value = 10
sale_item.icms_info.v_icms = 1
sale_item.icms_info.p_icms = 18
return test_sale
@pytest.fixture
def sale_type_out(example_creator, current_user, current_station):
test_sale = example_creator.create_sale()
test_sale.branch = current_station.branch
test_sale.open_date = datetime.strptime('2020-01-02', '%Y-%m-%d')
test_sale.confirm_date = datetime.strptime('2020-01-02', '%Y-%m-%d')
sale_item = example_creator.create_sale_item(test_sale)
sale_item.price = 10
sale_item.base_price = 15
sellable_category = example_creator.create_sellable_category(description='Category 1')
sale_item.sellable.category = sellable_category
sale_item.sellable.code = '1111111111111'
client = example_creator.create_client()
client.person.individual.cpf = '737.948.760-40'
test_sale.client = client
person = example_creator.create_person()
person.login_user = current_user
test_sale.salesperson.person = person
payment = example_creator.create_payment(group=test_sale.group)
payment.payment_type = Payment.TYPE_OUT
payment.paid_value = 10
sale_item.icms_info.v_icms = 1
sale_item.icms_info.p_icms = 18
return test_sale
@pytest.fixture
def cancelled_sale(example_creator, current_user, current_station):
sale = example_creator.create_sale()
sale.status = Sale.STATUS_CANCELLED
sale.branch = current_station.branch
sale.open_date = datetime.strptime('2020-01-02', '%Y-%m-%d')
sale.confirm_date = datetime.strptime('2020-01-02', '%Y-%m-%d')
sale.invoice.key = '33200423335270000159650830000000181790066862'
sale_item = example_creator.create_sale_item(sale)
sale_item.price = 10
sale_item.base_price = 15
sellable_category = example_creator.create_sellable_category(description='Category 1')
sale_item.sellable.category = sellable_category
sale_item.sellable.code = '22222'
client = example_creator.create_client()
client.person.individual.cpf = '737.948.760-40'
sale.client = client
person = example_creator.create_person()
person.login_user = current_user
sale.salesperson.person = person
payment = example_creator.create_payment(group=sale.group)
payment.payment_type = Payment.TYPE_IN
payment.paid_value = 10
sale_item.icms_info.v_icms = 1
sale_item.icms_info.p_icms = 18
return sale
@pytest.fixture
def sellable(example_creator):
sellable = example_creator.create_sellable()
return sellable
@pytest.fixture
def open_till(current_till, current_user):
if current_till.status != Till.STATUS_OPEN:
current_till.open_till(current_user)
return current_till
@pytest.fixture
def close_till(open_till, current_user):
open_till.close_till(current_user)
open_till.opening_date = datetime.strptime('2020-01-02 08:00', '%Y-%m-%d %H:%M')
open_till.closing_date = datetime.strptime('2020-01-02 18:00', '%Y-%m-%d %H:%M')
return open_till
@pytest.fixture
def network():
return {
'id': '35868887-3fae-11eb-9f78-40b89ae8d341',
'name': 'Company name'
}
@pytest.fixture
def client_category(example_creator):
return example_creator.create_client_category()
@pytest.fixture
def branch_with_active_station(example_creator):
return example_creator.create_branch()
@pytest.fixture
def branch_with_inactive_station(example_creator):
return example_creator.create_branch()
@pytest.fixture
def active_station(example_creator, branch_with_active_station):
return example_creator.create_station(branch=branch_with_active_station,
is_active=True, name='active station')
@pytest.fixture
def inactive_station(example_creator, branch_with_inactive_station):
return example_creator.create_station(branch=branch_with_inactive_station,
is_active=False, name='inactive station')
@pytest.fixture
def inactive_station2(example_creator, branch_with_active_station):
return example_creator.create_station(branch=branch_with_active_station,
is_active=False, name='inactive station 2')
@mock.patch('stoqserver.api.resources.b1food.UUID')
def test_check_if_uuid_valid(abort):
_check_if_uuid(['123'])
assert abort.call_args_list[0][0][0] == '123'
@mock.patch('stoqserver.api.resources.b1food.abort')
def test_check_if_uuid_invalid(abort):
_check_if_uuid(['123'])
assert abort.call_args_list[0][0] == (400, 'os IDs das lojas devem ser do tipo UUID')
def test_get_credit_provider_description(example_creator):
credit_provider = example_creator.create_credit_provider(short_name='Test')
assert _get_credit_provider_description(credit_provider) == 'Test'
@pytest.mark.parametrize('size', (1, 10, 30, 128))
def test_generate_b1food_token(size):
assert len(generate_b1food_token(size)) == size
@mock.patch('stoqserver.api.resources.b1food.get_config')
def test_b1food_success_login(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
query_string = {
'response_type': 'token',
'client_id': 'B1FoodClientId'
}
response = b1food_client.get('/b1food/oauth/authenticate',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert 'access_token' in res
assert res['token_type'] == 'Bearer'
assert res['expires_in'] == -1
@mock.patch('stoqserver.api.resources.b1food.get_config')
def test_b1food_login_without_client_id(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'response_type': 'token',
}
response = b1food_client.get('/b1food/oauth/authenticate',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 400
assert res['message'] == 'Missing client_id'
@mock.patch('stoqserver.api.resources.b1food.get_config')
def test_login_with_invalid_client_id(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'response_type': 'token',
'client_id': 'B1FoodInvalidClientId'
}
response = b1food_client.get('/b1food/oauth/authenticate',
query_string=query_string)
assert response.status_code == 403
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_income_center(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
}
response = b1food_client.get('b1food/terceiros/restful/centrosrenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_income_center_with_wrong_authorization(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "dasdadasded"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
}
response = b1food_client.get('b1food/terceiros/restful/centrosrenda',
query_string=query_string)
assert response.status_code == 401
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_sale_item_without_initial_date_arg(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtfim': '2020-01-01'
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 400
assert res['message'] == "Missing parameter 'dtinicio'"
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_sale_item_without_end_date_arg(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01'
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 400
assert res['message'] == "Missing parameter 'dtfim'"
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_sale_item_with_no_sales(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-01'
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sale_item_with_usarDtMov_arg(get_config_mock, b1food_client, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
sale.confirm_date = datetime.strptime('2020-01-04', '%Y-%m-%d')
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'usarDtMov': 0
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res1 = json.loads(response.data.decode('utf-8'))
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'usarDtMov': 1
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res2 = json.loads(response.data.decode('utf-8'))
assert len(res1) == 1
assert len(res2) == 0
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sale_item_with_lojas_arg(get_config_mock, b1food_client, current_station, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'lojas': current_station.branch.id
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) == 1
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sale_item_with_lojas_filter(get_config_mock, b1food_client,
current_station, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'lojas': [current_station.branch.id]
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) == 1
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_sale_item_with_consumidores_filter(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-01',
'consumidores': [97050782033, 70639759000102]
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_sale_item_with_operacaocupom_filter(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-01',
'operacaocupom': ['33200423335270000159650830000000181790066862']
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sale_item_successfully(get_config_mock, get_network_info,
b1food_client, store, sale, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03'
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
item = sale.get_items()[0]
sellable = item.sellable
station = store.get(BranchStation, sale.station_id)
salesperson = sale.salesperson
document = raw_document(sale.get_client_document())
assert response.status_code == 200
assert res == [{
'acrescimo': 0,
'atendenteCod': salesperson.person.login_user.username,
'atendenteId': salesperson.person.login_user.id,
'atendenteNome': salesperson.person.name,
'cancelado': False,
'codMaterial': sellable.code,
'codOrigem': None,
'consumidores': [{'documento': document, 'tipo': 'CPF'}],
'desconto': 5.0,
'descricao': sellable.description,
'dtLancamento': '2020-01-02',
'grupo': {
'ativo': True,
'codigo': sellable.category.id,
'dataAlteracao': sellable.category.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'descricao': sellable.category.description,
'idGrupo': sellable.category.id,
'idGrupoPai': sellable.category.category_id
},
'horaLancamento': '00:00',
'idItemVenda': item.id,
'idMaterial': sellable.id,
'idOrigem': None,
'isEntrega': False,
'isGorjeta': False,
'isRepique': False,
'isTaxa': False,
'lojaId': sale.branch.id,
'maquinaCod': station.id,
'maquinaId': station.id,
'nomeMaquina': station.name,
'operacaoId': sale.id,
'quantidade': 1.0,
'redeId': network['id'],
'valorBruto': 15.0,
'valorLiquido': 10.0,
'valorUnitario': 15.0,
'valorUnitarioLiquido': 10.0,
'tipoDescontoId': None,
'tipoDescontoCod': None,
'tipoDescontoNome': None
}]
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sale_item_with_cnpj_client_successfully(get_config_mock, b1food_client,
store, example_creator, sale_with_cnpj):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03'
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert res[0]['consumidores'] == [{'documento': '35600423000127', 'tipo': 'CNPJ'}]
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sale_item_with_empty_document(get_config_mock, b1food_client, store, sale):
sale.client.person.individual.cpf = ''
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03'
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert res[0]['consumidores'] == [{'documento': '', 'tipo': ''}]
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sale_item_cancelled_false(get_config_mock, b1food_client, store, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
sale.status = Sale.STATUS_CANCELLED
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'cancelados': 0
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert res == []
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sale_item_cancelled_true(get_config_mock, b1food_client, store, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
sale.status = Sale.STATUS_CANCELLED
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'cancelados': 1
}
response = b1food_client.get('b1food/terceiros/restful/itemvenda',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == 1
assert res[0]['cancelado'] is True
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sellables(get_config_mock, get_network_info, b1food_client,
store, sale, sellable, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
delivery = sysparam.get_object(store, 'DELIVERY_SERVICE')
sellables = store.find(Sellable, Ne(Sellable.id, delivery.sellable.id))
query_string = {
'Authorization': 'Bearer B1FoodClientId',
}
response = b1food_client.get('b1food/terceiros/restful/material',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == sellables.count()
res_item = [item for item in res if item['idMaterial'] == sellable.id]
assert res_item == [{
'idMaterial': sellable.id,
'codigo': sellable.code,
'descricao': sellable.description,
'unidade': sellable.unit,
'dataAlteracao': sellable.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'ativo': sellable.status == Sellable.STATUS_AVAILABLE,
'redeId': network['id'],
'lojaId': None,
'isTaxa': False,
'isRepique': False,
'isGorjeta': False,
'isEntrega': False,
'grupo': _get_category_info(sellable)
}]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sellables_with_lojas_filter(get_config_mock, get_network_info, b1food_client,
store, sale, current_station, sellable, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
branch_id = current_station.branch.id
ProductBranchOverride(store=store, product=sellable.product, branch_id=branch_id)
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'lojas': [current_station.branch.id]
}
response = b1food_client.get('b1food/terceiros/restful/material',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == 1
res_item = [item for item in res if item['idMaterial'] == sellable.id]
assert res_item == [{
'idMaterial': sellable.id,
'codigo': sellable.code,
'descricao': sellable.description,
'unidade': sellable.unit,
'dataAlteracao': sellable.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'ativo': sellable.status == Sellable.STATUS_AVAILABLE,
'redeId': network['id'],
'lojaId': branch_id,
'isTaxa': False,
'isRepique': False,
'isGorjeta': False,
'isEntrega': False,
'grupo': _get_category_info(sellable)
}]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sellables_with_lojas_filter_without_pbo(get_config_mock, get_network_info,
b1food_client, store, sale, current_station,
sellable, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
delivery = sysparam.get_object(store, 'DELIVERY_SERVICE')
sellables = store.find(Sellable, Ne(Sellable.id, delivery.sellable.id))
branch_id = current_station.branch.id
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'lojas': [current_station.branch.id]
}
response = b1food_client.get('b1food/terceiros/restful/material',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == sellables.count()
res_item = [item for item in res if item['idMaterial'] == sellable.id]
assert res_item == [{
'idMaterial': sellable.id,
'codigo': sellable.code,
'descricao': sellable.description,
'unidade': sellable.unit,
'dataAlteracao': sellable.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'ativo': sellable.status == Sellable.STATUS_AVAILABLE,
'redeId': network['id'],
'lojaId': branch_id,
'isTaxa': False,
'isRepique': False,
'isGorjeta': False,
'isEntrega': False,
'grupo': _get_category_info(sellable)
}]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sellables_with_lojas_filter_branch_not_found(get_config_mock, get_network_info,
b1food_client, store, sale,
current_station, sellable, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
branch_id = 'e78a5f80-9b17-4b31-85e9-f3ebbdfc15fa'
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'lojas': [branch_id]
}
response = b1food_client.get('b1food/terceiros/restful/material',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 404
msg = "Branch(es) ['{}'] not found".format(branch_id)
assert msg in res['message']
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sellables_available(get_config_mock, get_network_info, b1food_client,
store, sale, sellable, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
sellables = Sellable.get_available_sellables(store)
sellable.status = Sellable.STATUS_CLOSED
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 1
}
response = b1food_client.get('b1food/terceiros/restful/material',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == sellables.count()
res_unavailable = [item for item in res if item['ativo'] is False]
assert len(res_unavailable) == 0
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_sellables_unavailable(get_config_mock, get_network_info, b1food_client,
store, sale, sellable, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
sellable.status = Sellable.STATUS_CLOSED
unavailable_sellables = store.find(Sellable, Sellable.status != Sellable.STATUS_AVAILABLE)
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 0
}
response = b1food_client.get('b1food/terceiros/restful/material',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == unavailable_sellables.count()
res_available = [item for item in res if item['ativo'] is True]
assert len(res_available) == 0
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payment_with_lojas_filter(get_config_mock, b1food_client, store,
current_station, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'lojas': [current_station.branch.id]
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
payments = store.find(Payment)
assert payments.count() > 1
assert len(res) == 1
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_payment_with_consumidores_filter(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-01',
'consumidores': [97050782033, 70639759000102]
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_payment_with_operacaocupom_filter(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-01',
'operacaocupom': [
'33200423335270000159650830000000181790066862'
]
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payment_with_cnpj_client_successfully(get_config_mock, b1food_client,
store, example_creator, sale_with_cnpj):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert res[0]['consumidores'] == [{'documento': '35600423000127', 'tipo': 'CNPJ'}]
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payment_with_empty_document(get_config_mock, b1food_client, store, sale):
sale.client.person.individual.cpf = ''
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03'
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert res[0]['consumidores'] == [{'documento': '', 'tipo': ''}]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payments_successfully(get_config_mock, get_network_info,
b1food_client, store, sale, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03'
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
salesperson = sale.salesperson
payment = sale.group.payments[0]
card_payment = sale.group.payments[1]
document = raw_document(sale.get_client_document())
card_data = card_payment.card_data
provider = card_data.provider
card_type = card_data.card_type
assert response.status_code == 200
assert res == [
{
'idMovimentoCaixa': sale.id,
'redeId': network['id'],
'rede': network['name'],
'lojaId': sale.branch.id,
'loja': sale.branch.name,
'hora': '00',
'cancelado': sale.status == Sale.STATUS_CANCELLED,
'idAtendente': salesperson.person.login_user.id,
'codAtendente': salesperson.person.login_user.username,
'nomeAtendente': sale.salesperson.person.name,
'vlDesconto': 0.0,
'vlAcrescimo': 0.0,
'vlTotalReceber': 0.0,
'vlTotalRecebido': 15.0,
'vlTrocoFormasPagto': 0.0,
'vlServicoRecebido': 0,
'vlRepique': 0,
'vlTaxaEntrega': 0,
'numPessoas': 1,
'operacaoId': sale.id,
'maquinaId': sale.station.id,
'nomeMaquina': sale.station.name,
'maquinaCod': payment.station.id,
'maquinaPortaFiscal': None,
'meiosPagamento': [
{
'id': payment.method.id,
'codigo': payment.method.id,
'nome': _get_payment_method_name(payment.method.method_name),
'descricao': _get_payment_method_description(payment.method),
'valor': float(payment.paid_value),
'troco': float(payment.base_value - payment.value),
'valorRecebido': float(payment.value),
'idAtendente': sale.salesperson.person.login_user.id,
'codAtendente': sale.salesperson.person.login_user.username,
'nomeAtendente': sale.salesperson.person.name,
},
{
'id': _get_payment_method_with_provider_code(card_type, provider),
'codigo': _get_payment_method_with_provider_code(card_type, provider),
'nome': _get_card_name(card_payment.card_data.card_type,
card_payment.card_data.provider.short_name),
'descricao': _get_card_description(card_payment.card_data.card_type,
card_payment.card_data.provider.short_name),
'valor': float(card_payment.paid_value),
'troco': float(card_payment.base_value - card_payment.value),
'valorRecebido': float(card_payment.value),
'idAtendente': sale.salesperson.person.login_user.id,
'codAtendente': sale.salesperson.person.login_user.username,
'nomeAtendente': sale.salesperson.person.name,
}
],
'consumidores': [
{
'documento': document,
'tipo': 'CPF'
}
],
'dataContabil': '2020-01-02 00:00:00 -0300',
'periodoId': None,
'periodoCod': None,
'periodoNome': None,
'centroRendaId': None,
'centroRendaCod': None,
'centroRendaNome': None
},
]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payments_active(get_config_mock, get_network_info, b1food_client,
store, cancelled_sale, sale, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'ativo': 1
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
salesperson = sale.salesperson
payment = sale.group.payments[0]
card_payment = sale.group.payments[1]
document = raw_document(sale.get_client_document())
provider = card_payment.card_data.provider
card_type = card_payment.card_data.card_type
assert response.status_code == 200
assert res == [
{
'codAtendente': salesperson.person.login_user.username,
'consumidores': [
{
'documento': document,
'tipo': 'CPF'
}
],
'dataContabil': '2020-01-02 00:00:00 -0300',
'hora': '00',
'cancelado': sale.status == Sale.STATUS_CANCELLED,
'idAtendente': salesperson.person.login_user.id,
'idMovimentoCaixa': sale.id,
'loja': sale.branch.name,
'lojaId': sale.branch.id,
'maquinaCod': payment.station.id,
'maquinaId': sale.station.id,
'maquinaPortaFiscal': None,
'meiosPagamento': [
{
'id': payment.method.id,
'codigo': payment.method.id,
'nome': _get_payment_method_name(payment.method.method_name),
'descricao': _get_payment_method_description(payment.method),
'valor': float(payment.paid_value),
'troco': float(payment.base_value - payment.value),
'valorRecebido': float(payment.value),
'idAtendente': sale.salesperson.person.login_user.id,
'codAtendente': sale.salesperson.person.login_user.username,
'nomeAtendente': sale.salesperson.person.name,
},
{
'id': _get_payment_method_with_provider_code(card_type, provider),
'codigo': _get_payment_method_with_provider_code(card_type, provider),
'nome': _get_card_name(card_payment.card_data.card_type,
card_payment.card_data.provider.short_name),
'descricao': _get_card_description(card_payment.card_data.card_type,
card_payment.card_data.provider.short_name),
'valor': float(card_payment.paid_value),
'troco': float(card_payment.base_value - card_payment.value),
'valorRecebido': float(card_payment.value),
'idAtendente': sale.salesperson.person.login_user.id,
'codAtendente': sale.salesperson.person.login_user.username,
'nomeAtendente': sale.salesperson.person.name,
}
],
'nomeAtendente': sale.salesperson.person.name,
'nomeMaquina': sale.station.name,
'numPessoas': 1,
'operacaoId': sale.id,
'rede': network['name'],
'redeId': network['id'],
'vlAcrescimo': 0.0,
'vlTotalReceber': 0.0,
'vlTotalRecebido': 15.0,
'vlDesconto': 0.0,
'vlRepique': 0,
'vlServicoRecebido': 0,
'vlTaxaEntrega': 0,
'vlTrocoFormasPagto': 0,
'periodoId': None,
'periodoCod': None,
'periodoNome': None,
'centroRendaId': None,
'centroRendaCod': None,
'centroRendaNome': None
},
]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payments_inactive(get_config_mock, get_network_info, b1food_client,
store, sale, cancelled_sale, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'ativo': 0
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
salesperson = cancelled_sale.salesperson
payment = cancelled_sale.group.payments[0]
document = raw_document(cancelled_sale.get_client_document())
assert response.status_code == 200
assert res == [
{
'codAtendente': salesperson.person.login_user.username,
'consumidores': [
{
'documento': document,
'tipo': 'CPF'
}
],
'dataContabil': '2020-01-02 00:00:00 -0300',
'hora': '00',
'cancelado': cancelled_sale.status == Sale.STATUS_CANCELLED,
'idAtendente': salesperson.person.login_user.id,
'idMovimentoCaixa': cancelled_sale.id,
'loja': cancelled_sale.branch.name,
'lojaId': cancelled_sale.branch.id,
'maquinaCod': payment.station.id,
'maquinaId': cancelled_sale.station.id,
'maquinaPortaFiscal': None,
'meiosPagamento': [
{
'id': payment.method.id,
'codigo': payment.method.id,
'nome': _get_payment_method_name(payment.method.method_name),
'descricao': _get_payment_method_description(payment.method),
'valor': float(payment.paid_value),
'troco': float(payment.base_value - payment.value),
'valorRecebido': float(payment.value),
'idAtendente': cancelled_sale.salesperson.person.login_user.id,
'codAtendente': cancelled_sale.salesperson.person.login_user.username,
'nomeAtendente': cancelled_sale.salesperson.person.name,
}
],
'nomeAtendente': cancelled_sale.salesperson.person.name,
'nomeMaquina': cancelled_sale.station.name,
'numPessoas': 1,
'operacaoId': cancelled_sale.id,
'rede': network['name'],
'redeId': network['id'],
'vlAcrescimo': 0.0,
'vlTotalReceber': 0.0,
'vlTotalRecebido': 10.0,
'vlDesconto': 0.0,
'vlRepique': 0,
'vlServicoRecebido': 0,
'vlTaxaEntrega': 0,
'vlTrocoFormasPagto': 0,
'periodoId': None,
'periodoCod': None,
'periodoNome': None,
'centroRendaId': None,
'centroRendaCod': None,
'centroRendaNome': None
},
]
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payment_with_type_out(get_config_mock, b1food_client,
store, example_creator, sale_type_out):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
}
with pytest.raises(Exception) as error:
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
assert response.status_code == 500
assert "Inconsistent database, please contact support." in str(error.value)
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payments_cancelled_false(get_config_mock, b1food_client, store, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
sale.status = Sale.STATUS_CANCELLED
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'cancelados': 0
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert res == []
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payments_cancelled_true(get_config_mock, b1food_client, store, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
sale.status = Sale.STATUS_CANCELLED
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'cancelados': 1
}
response = b1food_client.get('b1food/terceiros/restful/movimentocaixa',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == 1
assert res[0]['cancelado'] is True
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_stations_successfully(get_config_mock, get_network_info,
b1food_client, active_station, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
}
response = b1food_client.get('/b1food/terceiros/restful/terminais',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
res_item = [item for item in res if item['id'] == active_station.id]
assert res_item == [
{
'apelido': active_station.name,
'ativo': active_station.is_active,
'codigo': active_station.id,
'id': active_station.id,
'lojaId': active_station.branch.id,
'nome': active_station.name,
'portaFiscal': None,
'redeId': network['id'],
'dataAlteracao': active_station.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataCriacao': active_station.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
}
]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_inactive_stations(get_config_mock, get_network_info,
b1food_client, inactive_station, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 0,
}
response = b1food_client.get('/b1food/terceiros/restful/terminais',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
res_item = [item for item in res if item['id'] == inactive_station.id]
assert res_item == [
{
'apelido': inactive_station.name,
'ativo': inactive_station.is_active,
'codigo': inactive_station.id,
'id': inactive_station.id,
'lojaId': inactive_station.branch.id,
'nome': inactive_station.name,
'portaFiscal': None,
'redeId': network['id'],
'dataAlteracao': inactive_station.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataCriacao': inactive_station.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
}
]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_active_stations_from_branch(get_config_mock, get_network_info, b1food_client, store,
inactive_station, active_station, inactive_station2,
network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 1,
'lojas': active_station.branch.id
}
response = b1food_client.get('/b1food/terceiros/restful/terminais',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
stations = store.find(BranchStation, branch_id=active_station.branch.id)
assert stations.count() == 2
assert len(res) == 1
assert res == [
{
'apelido': active_station.name,
'ativo': active_station.is_active,
'codigo': active_station.id,
'id': active_station.id,
'lojaId': active_station.branch.id,
'nome': active_station.name,
'portaFiscal': None,
'redeId': network['id'],
'dataAlteracao': active_station.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataCriacao': active_station.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
}
]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_stations_branch(get_config_mock, get_network_info, b1food_client, store,
active_station, inactive_station2, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'lojas': active_station.branch.id
}
response = b1food_client.get('/b1food/terceiros/restful/terminais',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
branch_stations = store.find(BranchStation, branch_id=active_station.branch.id)
assert len(res) == 2
assert branch_stations.count() == 2
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_receipts_with_lojas_filter(get_config_mock, b1food_client, store,
current_station, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'lojas': [current_station.branch.id]
}
response = b1food_client.get('b1food/terceiros/restful/comprovante',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
sales = store.find(Sale)
assert sales.count() > 1
assert len(res) == 1
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_receipts_with_consumidores_filter(get_config_mock, b1food_client, store, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'consumidores': [sale.get_client_document()]
}
response = b1food_client.get('b1food/terceiros/restful/comprovante',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
sales = store.find(Sale)
assert sales.count() > 1
assert len(res) == 1
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_receipts_with_operacaocupom_filter(get_config_mock, b1food_client, store, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'operacaocupom': [sale.invoice.key]
}
response = b1food_client.get('b1food/terceiros/restful/comprovante',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
sales = store.find(Sale)
assert len(res) == 1
assert sales.count() == 4
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_receipts_with_usarDtMov_filter(get_config_mock, b1food_client, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
sale.confirm_date = datetime.strptime('2020-01-04', '%Y-%m-%d')
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'usarDtMov': 0
}
response = b1food_client.get('b1food/terceiros/restful/comprovante',
query_string=query_string)
res1 = json.loads(response.data.decode('utf-8'))
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'usarDtMov': 1
}
response = b1food_client.get('b1food/terceiros/restful/comprovante',
query_string=query_string)
res2 = json.loads(response.data.decode('utf-8'))
assert len(res1) == 1
assert len(res2) == 0
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_receipts_successfully(get_config_mock, b1food_client, store, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03'
}
response = b1food_client.get('b1food/terceiros/restful/comprovante',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
item = sale.get_items()[0]
discount = item.item_discount
payment = sale.group.payments[0]
card_payment = sale.group.payments[1]
provider = card_payment.card_data.provider
card_type = card_payment.card_data.card_type
assert response.status_code == 200
assert res == [
{
'maquinaCod': sale.station.id,
'nomeMaquina': sale.station.name,
'nfNumero': sale.invoice.invoice_number,
'nfSerie': sale.invoice.series,
'denominacao': sale.invoice.mode,
'valor': sale.total_amount,
'maquinaId': sale.station.id,
'desconto': float(sale.discount_value),
'acrescimo': float(-1 * min(discount, 0)),
'chaveNfe': sale.invoice.key,
'dataContabil': sale.confirm_date.strftime('%Y-%m-%d'),
'dataEmissao': sale.confirm_date.strftime('%Y-%m-%d %H:%M:%S -0300'),
'idOperacao': sale.id,
'troco': 0.0,
'pagamentos': float(sale.paid),
'dataMovimento': sale.confirm_date.strftime('%Y-%m-%d %H:%M:%S -0300'),
'cancelado': True if sale.cancel_date else False,
'detalhes': [
{
'ordem': None,
'idMaterial': item.sellable.id,
'codigo': item.sellable.code,
'desconto': float(item.item_discount),
'descricao': item.sellable.description,
'quantidade': float(item.quantity),
'valorBruto': float(item.base_price * item.quantity),
'valorUnitario': float(item.base_price),
'valorUnitarioLiquido': float(item.price),
'valorLiquido': float(item.price * item.quantity),
'codNcm': item.sellable.product.ncm,
'idOrigem': None,
'codOrigem': None,
'cfop': str(item.cfop.code),
'acrescimo': 0.0,
'cancelado': True if sale.cancel_date else False,
'maquinaId': sale.station.id,
'nomeMaquina': sale.station.name,
'maquinaCod': sale.station.id,
'isTaxa': None,
'isRepique': None,
'isGorjeta': None,
'isEntrega': None,
}
],
'meios': [
{
'id': payment.method.id,
'codigo': payment.method.id,
'nome': _get_payment_method_name(payment.method.method_name),
'descricao': _get_payment_method_description(payment.method),
'valor': float(payment.paid_value),
'troco': float(payment.base_value - payment.value),
'valorRecebido': float(payment.value),
'idAtendente': sale.salesperson.person.login_user.id,
'codAtendente': sale.salesperson.person.login_user.username,
'nomeAtendente': sale.salesperson.person.name,
},
{
'id': _get_payment_method_with_provider_code(card_type, provider),
'codigo': _get_payment_method_with_provider_code(card_type, provider),
'nome': _get_card_name(card_payment.card_data.card_type,
card_payment.card_data.provider.short_name),
'descricao': _get_card_description(card_payment.card_data.card_type,
card_payment.card_data.provider.short_name),
'valor': float(card_payment.paid_value),
'troco': float(card_payment.base_value - card_payment.value),
'valorRecebido': float(card_payment.value),
'idAtendente': sale.salesperson.person.login_user.id,
'codAtendente': sale.salesperson.person.login_user.username,
'nomeAtendente': sale.salesperson.person.name,
}
],
}
]
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_receipts_cancelled_false(get_config_mock, b1food_client, store, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
sale.status = Sale.STATUS_CANCELLED
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'cancelados': 0
}
response = b1food_client.get('b1food/terceiros/restful/comprovante',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert res == []
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_receipts_cancelled_true(get_config_mock, b1food_client, store, sale):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
sale.status = Sale.STATUS_CANCELLED
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'dtinicio': '2020-01-01',
'dtfim': '2020-01-03',
'cancelados': 1
}
response = b1food_client.get('b1food/terceiros/restful/comprovante',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == 1
assert res[0]['cancelado'] is True
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payment_methods(get_config_mock, get_network_info,
b1food_client, store, sale, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
payment_methods = store.find(PaymentMethod)
payment_method = store.find(PaymentMethod, method_name='money').one()
provider = CreditProvider(store=store, short_name='TESTE')
CreditCardData(store=store, card_type='credit', provider=provider)
query_string = {'Authorization': 'Bearer B1FoodClientId'}
response = b1food_client.get('b1food/terceiros/restful/meio-pagamento',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
select = Select((CreditCardData.card_type, CreditCardData.provider_id), distinct=True)
credit_providers = store.execute(select)
# -1 because 'card' payment_method is replaced by the credit providers
assert len(res) == payment_methods.count() + credit_providers.rowcount - 1
res_item_payment_method = [item for item in res if item['id'] == payment_method.id]
assert res_item_payment_method == [{
'ativo': payment_method.is_active,
'id': payment_method.id,
'codigo': payment_method.id,
'nome': _get_payment_method_name(payment_method.method_name),
'redeId': network['id'],
'lojaId': None
}]
credit_provider = credit_providers.get_one()
card_type = credit_provider[0]
provider_id = credit_provider[1]
provider = store.get(CreditProvider, provider_id)
provider_code = _get_payment_method_with_provider_code(card_type, provider)
res_item_credit_provider = [item for item in res if item['id'] == provider_code]
assert res_item_credit_provider == [{
'ativo': provider.visible,
'id': provider_code,
'codigo': provider_code,
'nome': _get_card_name(card_type, provider.short_name),
'redeId': network['id'],
'lojaId': None
}]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payment_methods_active(get_config_mock, get_network_info,
b1food_client, store, sale, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
payment_methods_active = PaymentMethod.get_active_methods(store)
payment_method_active = payment_methods_active[0]
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 1
}
response = b1food_client.get('b1food/terceiros/restful/meio-pagamento',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
select = Select((CreditCardData.card_type, CreditCardData.provider_id), distinct=True)
credit_providers_count = store.execute(select).rowcount
assert len(res) == len(payment_methods_active) + credit_providers_count - 1
assert res[0] == {
'ativo': payment_method_active.is_active,
'id': payment_method_active.id,
'codigo': payment_method_active.id,
'nome': _get_payment_method_name(payment_method_active.method_name),
'redeId': network['id'],
'lojaId': None
}
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payment_methods_active_card_inactive(get_config_mock, get_network_info,
b1food_client, store, sale, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
payment_methods = store.find(PaymentMethod)
card_payment_method = payment_methods.find(method_name='card').one()
card_payment_method.is_active = False
payment_methods_active = payment_methods.find(is_active=True)
payment_method_active = payment_methods_active.any()
provider_visible = CreditProvider(store=store, short_name='VISIBLE PROVIDER', visible=True)
CreditCardData(store=store, card_type='credit', provider=provider_visible)
provider_not_visible = CreditProvider(store=store, short_name='NOT VISIBLE PROVIDER',
visible=False)
CreditCardData(store=store, card_type='credit', provider=provider_not_visible)
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 1
}
response = b1food_client.get('b1food/terceiros/restful/meio-pagamento',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == payment_methods_active.count()
assert not any(res_item['id'] == card_payment_method.id for res_item in res)
res_item = {
'ativo': payment_method_active.is_active,
'id': payment_method_active.id,
'codigo': payment_method_active.id,
'nome': _get_payment_method_name(payment_method_active.method_name),
'redeId': network['id'],
'lojaId': None
}
assert res_item in res
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payment_methods_inactive(get_config_mock, get_network_info,
b1food_client, store, sale, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
payment_method = store.find(PaymentMethod, method_name='money').one()
payment_method.is_active = False
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 0
}
response = b1food_client.get('b1food/terceiros/restful/meio-pagamento',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == 1
assert res[0] == {
'ativo': payment_method.is_active,
'id': payment_method.id,
'codigo': payment_method.id,
'nome': _get_payment_method_name(payment_method.method_name),
'redeId': network['id'],
'lojaId': None
}
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_payment_methods_inactive_card_active(get_config_mock, get_network_info,
b1food_client, store, sale, network):
get_config_mock.return_value.get.return_value = "B1FoodClientId"
get_network_info.return_value = network
payment_methods = store.find(PaymentMethod)
inactive_payment_method = payment_methods.find(method_name='money').one()
inactive_payment_method.is_active = False
card_payment_method = payment_methods.find(method_name='card')
card_payment_method.is_active = True
provider_visible = CreditProvider(store=store, short_name='VISIBLE PROVIDER', visible=True)
ccd = CreditCardData(store=store, card_type='credit', provider=provider_visible)
provider_not_visible = CreditProvider(store=store, short_name='NOT VISIBLE PROVIDER',
visible=False)
credit_card_data = CreditCardData(store=store, card_type='credit',
provider=provider_not_visible)
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 0
}
response = b1food_client.get('b1food/terceiros/restful/meio-pagamento',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
assert len(res) == 2
res_item_payment_method = {
'ativo': inactive_payment_method.is_active,
'id': inactive_payment_method.id,
'codigo': inactive_payment_method.id,
'nome': _get_payment_method_name(inactive_payment_method.method_name),
'redeId': network['id'],
'lojaId': None
}
assert res_item_payment_method in res
res_item_credit_provider = {
'ativo': False,
'id': _get_payment_method_with_provider_code(ccd.card_type, provider_not_visible),
'codigo': _get_payment_method_with_provider_code(ccd.card_type, provider_not_visible),
'nome': _get_card_name(credit_card_data.card_type,
provider_not_visible.short_name),
'redeId': network['id'],
'lojaId': None
}
assert res_item_credit_provider in res
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_tills_successfully(get_config_mock, get_network_info,
b1food_client, close_till, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
}
response = b1food_client.get('/b1food/terceiros/restful/periodos',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_user_profile_successfully(get_config_mock, get_network_info, b1food_client,
current_user, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
profile = current_user.profile
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
}
response = b1food_client.get('/b1food/terceiros/restful/cargos',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) > 0
assert res[0] == {
'ativo': True,
'id': profile.id,
'codigo': profile.id,
'dataCriacao': profile.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataAlteracao': profile.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
'nome': profile.name,
'redeId': network['id'],
'lojaId': None,
}
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_user_profile_active(get_config_mock, get_network_info, b1food_client,
current_user, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
profile = current_user.profile
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 1
}
response = b1food_client.get('/b1food/terceiros/restful/cargos',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) > 0
assert res[0] == {
'ativo': True,
'id': profile.id,
'codigo': profile.id,
'dataCriacao': profile.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataAlteracao': profile.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
'nome': profile.name,
'redeId': network['id'],
'lojaId': None,
}
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_user_profile_inactive(get_config_mock, b1food_client):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 0
}
response = b1food_client.get('/b1food/terceiros/restful/cargos',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_branches_successfully(get_config_mock, get_network_info,
b1food_client, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
}
response = b1food_client.get('/b1food/terceiros/restful/rede-loja',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) > 0
assert res[0]['idRede'] == network['id']
assert res[0]['nome'] == network['name']
assert res[0]['ativo'] is True
assert len(res[0]['lojas']) > 0
assert 'idLoja' in res[0]['lojas'][0]
assert 'nome' in res[0]['lojas'][0]
assert 'ativo' in res[0]['lojas'][0]
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_branches_only_active(get_config_mock, get_network_info,
b1food_client, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': '1',
}
response = b1food_client.get('/b1food/terceiros/restful/rede-loja',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) > 0
assert res[0]['idRede'] == network['id']
assert res[0]['nome'] == network['name']
assert res[0]['ativo'] is True
assert len(res[0]['lojas']) > 0
assert 'idLoja' in res[0]['lojas'][0]
assert 'nome' in res[0]['lojas'][0]
assert 'ativo' in res[0]['lojas'][0]
assert res[0]['lojas'][0]['ativo'] is True
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_discount_categories_successfully(get_config_mock, get_network_info,
b1food_client, network, client_category):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
}
response = b1food_client.get('/b1food/terceiros/restful/tiposdescontos',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) > 0
assert res[0] == {
'ativo': True,
'id': client_category.id,
'codigo': client_category.id,
'dataCriacao': client_category.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataAlteracao': client_category.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'nome': client_category.name,
'redeId': network['id'],
'lojaId': None
}
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_discount_categories_active(get_config_mock, get_network_info,
b1food_client, network, client_category):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 1
}
response = b1food_client.get('/b1food/terceiros/restful/tiposdescontos',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) > 0
assert res[0] == {
'ativo': True,
'id': client_category.id,
'codigo': client_category.id,
'dataCriacao': client_category.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataAlteracao': client_category.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'nome': client_category.name,
'redeId': network['id'],
'lojaId': None
}
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
@pytest.mark.usefixtures('mock_new_store')
def test_get_discount_categories_inactive(get_config_mock, get_network_info,
b1food_client, network, client_category):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 0
}
response = b1food_client.get('/b1food/terceiros/restful/tiposdescontos',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_login_users(get_config_mock, get_network_info, store,
b1food_client, network, current_user):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
}
response = b1food_client.get('/b1food/terceiros/restful/funcionarios',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) == store.find(LoginUser).count()
person_names = _get_person_names(current_user.person)
current_user_res = {
'id': current_user.id,
'codigo': current_user.username,
'dataCriacao': current_user.te.te_time.strftime('%Y-%m-%d %H:%M:%S -0300'),
'dataAlteracao': current_user.te.te_server.strftime('%Y-%m-%d %H:%M:%S -0300'),
'primeiroNome': person_names['primeiroNome'],
'segundoNome': person_names['segundoNome'],
'sobrenome': person_names['sobrenome'],
'apelido': person_names['apelido'],
'idCargo': current_user.profile.id,
'codCargo': current_user.profile.id,
'nomeCargo': current_user.profile.name,
'redeId': network['id'],
'lojaId': None,
'ativo': current_user.is_active
}
assert current_user_res in res
@pytest.mark.usefixtures('mock_new_store')
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_login_users_active(get_config_mock, get_network_info, store,
b1food_client, current_user, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
current_user.is_active = False
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 1
}
response = b1food_client.get('/b1food/terceiros/restful/funcionarios',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert len(res) == store.find(LoginUser, is_active=True).count()
assert not any(res_item['id'] == current_user.id for res_item in res)
@pytest.mark.usefixtures('mock_new_store')
@mock.patch('stoqserver.api.resources.b1food._get_network_info')
@mock.patch('stoqserver.api.decorators.get_config')
def test_get_login_users_inactive(get_config_mock, get_network_info,
b1food_client, current_user, network):
get_config_mock.return_value.get.return_value = 'B1FoodClientId'
get_network_info.return_value = network
query_string = {
'Authorization': 'Bearer B1FoodClientId',
'ativo': 0
}
response = b1food_client.get('/b1food/terceiros/restful/funcionarios',
query_string=query_string)
res = json.loads(response.data.decode('utf-8'))
assert res == []
| stoq/stoq-server | tests/api/resources/test_b1food.py | Python | gpl-2.0 | 82,585 |
#!/usr/bin/python
#coding=utf-8
'''This is test module
@author: sheng
@contact: sinotradition@gmail.com
@copyright: License according to the project license.
'''
import unittest
from sinoera.sinozodiac import dogloyalty
TestDogloyaltyFunctions(unittest.TestCase):
def setUp(self):
pass
def test_XXX(self):
pass
if __name__ == "__main__":
unittest.main()
| sinotradition/sinoera | sinoera/tst/sinozodiac/test_dogloyalty.py | Python | apache-2.0 | 390 |
from django import forms
from .models import Comment
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('name', 'email', 'body')
def __init__(self, *args, **kwargs):
self.entry = kwargs.pop('entry') # the blog entry instance
super(CommentForm, self).__init__(*args, **kwargs)
def save(self):
comment = super(CommentForm, self).save(commit=False)
comment.entry = self.entry
comment.save()
return comment
| claude-lee/MyBlog | blog/forms.py | Python | mit | 512 |
from daisychain.steps.input import Input
from daisychain.field import Field
from py3compat import string_types
class InputFile(Input):
path = Field(instance_of=string_types)
def run(self):
with open(self.path) as f:
self.output = f.read()
self.status.set_finished()
| python-daisychain/daisychain | daisychain/steps/inputs/file.py | Python | mit | 304 |
from __future__ import unicode_literals
from datetime import date, timedelta
from decimal import Decimal
from django.test import TestCase
from model_mommy import mommy
class BudgetModelTest(TestCase):
def test_create_new_budget(self):
budget = mommy.make('Budget')
self.assertTrue(budget.name)
self.assertTrue(budget.slug)
self.assertTrue(budget.start_date)
self.assertFalse(budget.is_deleted)
def test_budget_unicode_string(self):
budget = mommy.make('Budget')
self.assertTrue(budget.name, str(budget))
def test_budget_active_manager(self):
from budget.models import Budget
mommy.make(Budget, is_deleted=True)
self.assertEqual(1, Budget.objects.count())
self.assertEqual(0, Budget.active.count())
def test_budget_actual_total(self):
budget = mommy.make('Budget')
category = mommy.make('Category')
start_date, end_date = date.today(), date.today()
mommy.make('Transaction', amount=Decimal('10.0'), date=start_date, category=category)
mommy.make('Transaction', amount=Decimal('10.0'), date=start_date, category=category)
mommy.make('BudgetEstimate', category=category, budget=budget)
self.assertEqual(Decimal('20.0'), budget.actual_total(start_date, end_date))
def test_budget_actual_total_with_deleted_transaction(self):
start_date, end_date = date.today(), date.today()
budget = mommy.make('Budget')
category = mommy.make('Category')
mommy.make('Transaction', is_deleted=True, date=start_date, category=category)
mommy.make('BudgetEstimate', category=category, budget=budget)
self.assertEqual(0, budget.actual_total(start_date, end_date))
def test_budget_estimates_and_transactions(self):
budget = mommy.make('Budget')
start_date, end_date = date.today(), date.today()
c1 = mommy.make('Category')
t1 = mommy.make('Transaction', amount=Decimal('25.0'), date=start_date, category=c1)
t2 = mommy.make('Transaction', amount=Decimal('75.0'), date=start_date, category=c1)
e1 = mommy.make('BudgetEstimate', category=c1, budget=budget)
c2 = mommy.make('Category')
t3 = mommy.make('Transaction', amount=Decimal('30.0'), date=start_date, category=c2)
e2 = mommy.make('BudgetEstimate', category=c2, budget=budget)
estimates, total = budget.estimates_and_transactions(start_date, end_date)
self.assertEqual(Decimal('130.0'), total)
self.assertEqual(2, len(estimates))
self.assertIn(t1, estimates[0]['transactions'])
self.assertIn(t2, estimates[0]['transactions'])
self.assertEqual(e1, estimates[0]['estimate'])
self.assertEqual(Decimal('100.0'), estimates[0]['actual_amount'])
self.assertIn(t3, estimates[1]['transactions'])
self.assertEqual(e2, estimates[1]['estimate'])
self.assertEqual(Decimal('30.0'), estimates[1]['actual_amount'])
def test_budget_most_current_for_date(self):
from budget.models import Budget
today = date.today()
yesterday = today - timedelta(days=1)
tomorrow = today + timedelta(days=1)
end_date = today + timedelta(days=7)
mommy.make(Budget, start_date=yesterday)
mommy.make(Budget, start_date=today)
budget = mommy.make(Budget, start_date=tomorrow)
self.assertEqual(3, Budget.active.count())
self.assertEqual(budget, Budget.active.most_current_for_date(end_date))
| eliostvs/django-budget | django-budget/budget/tests/tests_budget_models.py | Python | mit | 3,558 |
"""
Wikipedia utils.
@author: Faegheh Hasibi (faegheh.hasibi@idi.ntnu.no)
"""
from urllib import quote
class WikipediaUtils(object):
mongo = None
@staticmethod
def wiki_title_to_uri(title):
"""
Converts wiki page title to wiki_uri
based on https://en.wikipedia.org/wiki/Wikipedia:Page_name#Spaces.2C_underscores_and_character_coding
encoding based on http://dbpedia.org/services-resources/uri-encoding
"""
if title:
wiki_uri = "<wikipedia:" + quote(title, ' !$&\'()*+,-./:;=@_~').replace(' ', '_') + ">"
return wiki_uri
else:
return None
@staticmethod
def wiki_uri_to_dbp_uri(wiki_uri):
"""Converts Wikipedia uri to DBpedia URI."""
return wiki_uri.replace("<wikipedia:", "<dbpedia:")
def main():
# example usage
print WikipediaUtils.wiki_title_to_uri("Tango (genre musical)")
if __name__ == "__main__":
main() | hasibi/TAGME-Reproducibility | nordlys/wikipedia/utils.py | Python | mit | 966 |
"""
Supplies Layer and related classes that allow overlaying of Views,
including Overlay. A Layer is the final extension of View base class
that allows Views to be overlayed on top of each other.
Also supplies ViewMap which is the primary multi-dimensional Map type
for indexing, slicing and animating collections of Views.
"""
from functools import reduce
import numpy as np
import param
from .dimension import Dimension, Dimensioned, ViewableElement, ViewableTree
from .ndmapping import UniformNdMapping
from .layout import Composable, Layout, AdjointLayout
from .util import config, sanitize_identifier, unique_array
class Overlayable(object):
"""
Overlayable provides a mix-in class to support the
mul operation for overlaying multiple elements.
"""
def __mul__(self, other):
"Overlay object with other object."
if type(other).__name__ == 'DynamicMap':
from .spaces import Callable
def dynamic_mul(*args, **kwargs):
element = other[args]
return self * element
callback = Callable(dynamic_mul, inputs=[self, other])
callback._is_overlay = True
return other.clone(shared_data=False, callback=callback,
streams=[])
if isinstance(other, UniformNdMapping) and not isinstance(other, CompositeOverlay):
items = [(k, self * v) for (k, v) in other.items()]
return other.clone(items)
elif isinstance(other, (AdjointLayout, ViewableTree)) and not isinstance(other, Overlay):
return NotImplemented
return Overlay([self, other])
class CompositeOverlay(ViewableElement, Composable):
"""
CompositeOverlay provides a common baseclass for Overlay classes.
"""
_deep_indexable = True
def hist(self, dimension=None, num_bins=20, bin_range=None,
adjoin=True, index=0, **kwargs):
"""Computes and adjoins histogram along specified dimension(s).
Defaults to first value dimension if present otherwise falls
back to first key dimension.
Args:
dimension: Dimension(s) to compute histogram on
num_bins (int, optional): Number of bins
bin_range (tuple optional): Lower and upper bounds of bins
adjoin (bool, optional): Whether to adjoin histogram
index (int, optional): Index of layer to apply hist to
Returns:
AdjointLayout of element and histogram or just the
histogram
"""
valid_ind = isinstance(index, int) and (0 <= index < len(self))
valid_label = index in [el.label for el in self]
if not any([valid_ind, valid_label]):
raise TypeError("Please supply a suitable index or label for the histogram data")
hists = self.get(index).hist(
adjoin=False, dimension=dimension, bin_range=bin_range,
num_bins=num_bins, **kwargs)
if not isinstance(hists, Layout):
hists = [hists]
if not isinstance(dimension, list):
dimension = ['Default']
if adjoin:
layout = self
for hist in hists:
layout = layout << hist
layout.main_layer = index
elif len(dimension) > 1:
layout = hists
else:
layout = hists[0]
return layout
def dimension_values(self, dimension, expanded=True, flat=True):
"""Return the values along the requested dimension.
Args:
dimension: The dimension to return values for
expanded (bool, optional): Whether to expand values
Whether to return the expanded values, behavior depends
on the type of data:
* Columnar: If false returns unique values
* Geometry: If false returns scalar values per geometry
* Gridded: If false returns 1D coordinates
flat (bool, optional): Whether to flatten array
Returns:
NumPy array of values along the requested dimension
"""
values = []
found = False
for el in self:
if dimension in el.dimensions(label=True):
values.append(el.dimension_values(dimension))
found = True
if not found:
return super(CompositeOverlay, self).dimension_values(dimension, expanded, flat)
values = [v for v in values if v is not None and len(v)]
if not values:
return np.array()
vals = np.concatenate(values)
return vals if expanded else unique_array(vals)
class Overlay(ViewableTree, CompositeOverlay):
"""
An Overlay consists of multiple Elements (potentially of
heterogeneous type) presented one on top each other with a
particular z-ordering.
Overlays along with elements constitute the only valid leaf types of
a Layout and in fact extend the Layout structure. Overlays are
constructed using the * operator (building an identical structure
to the + operator).
"""
def __init__(self, items=None, group=None, label=None, **params):
self.__dict__['_fixed'] = False
self.__dict__['_group'] = group
self.__dict__['_label'] = label
super(Overlay, self).__init__(items, **params)
def __getitem__(self, key):
"""
Allows transparently slicing the Elements in the Overlay
to select specific layers in an Overlay use the .get method.
"""
return Overlay([(k, v[key]) for k, v in self.items()])
def get(self, identifier, default=None):
"""Get a layer in the Overlay.
Get a particular layer in the Overlay using its path string
or an integer index.
Args:
identifier: Index or path string of the item to return
default: Value to return if no item is found
Returns:
The indexed layer of the Overlay
"""
if isinstance(identifier, int):
values = list(self.data.values())
if 0 <= identifier < len(values):
return values[identifier]
else:
return default
return super(Overlay, self).get(identifier, default)
def __add__(self, other):
"Composes Overlay with other object into a Layout"
return Layout([self, other])
def __mul__(self, other):
"Adds layer(s) from other object to Overlay"
if type(other).__name__ == 'DynamicMap':
from .spaces import Callable
def dynamic_mul(*args, **kwargs):
element = other[args]
return self * element
callback = Callable(dynamic_mul, inputs=[self, other])
callback._is_overlay = True
return other.clone(shared_data=False, callback=callback,
streams=[])
elif not isinstance(other, ViewableElement):
return NotImplemented
return Overlay([self, other])
def collate(self):
"""
Collates any objects in the Overlay resolving any issues
the recommended nesting structure.
"""
return reduce(lambda x,y: x*y, self.values())
@property
def group(self):
if self._group:
return self._group
elements = [el for el in self if not el._auxiliary_component]
values = {el.group for el in elements}
types = {type(el) for el in elements}
if values:
group = list(values)[0]
vtype = list(types)[0].__name__
else:
group, vtype = [], ''
if len(values) == 1 and group != vtype:
return group
else:
return type(self).__name__
@group.setter
def group(self, group):
if not sanitize_identifier.allowable(group):
raise ValueError("Supplied group %s contains invalid characters." %
group)
else:
self._group = group
@property
def label(self):
if self._label:
return self._label
labels = {el.label for el in self
if not el._auxiliary_component}
if len(labels) == 1:
return list(labels)[0]
else:
return ''
@label.setter
def label(self, label):
if not sanitize_identifier.allowable(label):
raise ValueError("Supplied group %s contains invalid characters." %
label)
self._label = label
@property
def ddims(self):
dimensions = []
dimension_names = []
for el in self:
for dim in el.dimensions():
if dim.name not in dimension_names:
dimensions.append(dim)
dimension_names.append(dim.name)
return dimensions
@property
def shape(self):
raise NotImplementedError
# Deprecated methods
def collapse(self, function):
"Deprecated method to collapse layers in the Overlay."
if config.future_deprecations:
self.param.warning('Overlay.collapse is deprecated, to'
'collapse multiple elements use a HoloMap.')
elements = list(self)
types = [type(el) for el in elements]
values = [el.group for el in elements]
if not len(set(types)) == 1 and len(set(values)) == 1:
raise Exception("Overlay is not homogeneous in type or group "
"and cannot be collapsed.")
else:
return elements[0].clone(types[0].collapse_data([el.data for el in elements],
function, self.kdims))
class NdOverlay(Overlayable, UniformNdMapping, CompositeOverlay):
"""
An NdOverlay allows a group of NdOverlay to be overlaid together. NdOverlay can
be indexed out of an overlay and an overlay is an iterable that iterates
over the contained layers.
"""
kdims = param.List(default=[Dimension('Element')], constant=True, doc="""
List of dimensions the NdOverlay can be indexed by.""")
_deep_indexable = True
def __init__(self, overlays=None, kdims=None, **params):
super(NdOverlay, self).__init__(overlays, kdims=kdims, **params)
__all__ = list(set([_k for _k, _v in locals().items()
if isinstance(_v, type) and issubclass(_v, Dimensioned)])) + ['Overlayable']
| basnijholt/holoviews | holoviews/core/overlay.py | Python | bsd-3-clause | 10,540 |
# -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class StatsPunchCard(github.GithubObject.NonCompletableGithubObject):
"""
This class represents the punch card. The reference can be found here http://developer.github.com/v3/repos/statistics/#get-the-number-of-commits-per-hour-in-each-day
"""
def get(self, day, hour):
"""
Get a specific element
:param day: int
:param hour: int
:rtype: int
"""
return self._dict[(day, hour)]
def _initAttributes(self):
self._dict = {}
def _useAttributes(self, attributes):
for day, hour, commits in attributes:
self._dict[(day, hour)] = commits
| ArthurGarnier/SickRage | lib/github/StatsPunchCard.py | Python | gpl-3.0 | 2,422 |
# Copyright 2015 Tianchuan Du University of Delaware
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import theano.tensor as T
def maxout_func(n_out, last_start, pool_size, rectify, lin_output):
tmp_output = lin_output[:,0:last_start+1:pool_size]
for i in range(1, pool_size):
cur = lin_output[:,i:last_start+i+1:pool_size]
tmp_output = T.maximum(cur, tmp_output)
if rectify:
self.tmp_output = T.maximum(0, self.tmp_output)
| magic2du/dlnn | utils/activation.py | Python | apache-2.0 | 1,024 |
"""
Translation macro
Translates mid-level into low-level
The mid-level is assumed to be correct; any errors should be caught there
"""
from warnings import warn
from collections import OrderedDict
from functools import partial
from seamless.core import (cell as core_cell,
transformer, reactor, context, macro, StructuredCell)
from . import copying
from .util import as_tuple, get_path, get_path_link, find_channels, build_structured_cell
import logging
logger = logging.getLogger("seamless")
def print_info(*args):
msg = " ".join([str(arg) for arg in args])
logger.info(msg)
def print_warning(*args):
msg = " ".join([str(arg) for arg in args])
logger.warning(msg)
def print_debug(*args):
msg = " ".join([str(arg) for arg in args])
logger.debug(msg)
def print_error(*args):
msg = " ".join([str(arg) for arg in args])
logger.error(msg)
direct_celltypes = (
"text", "plain", "mixed", "binary",
"cson", "yaml", "str", "bytes", "int", "float", "bool",
"checksum"
)
empty_dict_checksum = 'd0a1b2af1705c1b8495b00145082ef7470384e62ac1c4d9b9cdbbe0476c28f8c'
def set_structured_cell_from_checksum(cell, checksum):
trigger = False
"""
if "temp" in checksum:
assert len(checksum) == 1, checksum.keys()
temp_checksum = checksum["temp"]
if cell.hash_pattern is not None:
temp_cs = bytes.fromhex(temp_checksum)
temp_cs2 = apply_hash_pattern_sync(
temp_cs, cell.hash_pattern
)
temp_checksum = temp_cs2.hex()
cell.auth._set_checksum(temp_checksum, initial=True, from_structured_cell=False)
trigger = True
else:
"""
if "value" in checksum:
# not done! value calculated anew...
"""
cell._data._set_checksum(
checksum["value"],
from_structured_cell=True,
initial=True
)
trigger = True
"""
if "buffer" in checksum:
# not done! value calculated anew...
"""
cell.buffer._set_checksum(
checksum["buffer"],
from_structured_cell=True,
initial=True
)
trigger = True
"""
if "auth" in checksum:
if cell.auth is None:
msg = "Warning: %s has no independence, but an auth checksum is present"
print(msg % cell)
else:
cell.auth._set_checksum(
checksum["auth"],
from_structured_cell=True,
initial=True
)
cell._data._void = False
cell._data._status_reason = None
trigger = True
schema_checksum = empty_dict_checksum
if "schema" in checksum:
schema_checksum = checksum["schema"]
cell.schema._set_checksum(
schema_checksum,
from_structured_cell=True,
initial=True
)
trigger = True
if trigger:
cell._get_manager().structured_cell_trigger(cell)
def translate_cell(node, root, namespace, inchannels, outchannels):
path = node["path"]
parent = get_path(root, path[:-1], None, None)
name = path[-1]
ct = node["celltype"]
if ct == "structured":
datatype = node["datatype"]
### TODO: harmonize datatype with schema type
hash_pattern = node["hash_pattern"]
mount = node.get("mount")
child = build_structured_cell(
parent, name,
inchannels, outchannels,
fingertip_no_remote=node.get("fingertip_no_remote", False),
fingertip_no_recompute=node.get("fingertip_no_recompute", False),
hash_pattern=hash_pattern,
mount=mount
)
for inchannel in inchannels:
cname = child.inchannels[inchannel].subpath
if cname == "self":
cpath = path
else:
if isinstance(cname, str):
cname = (cname,)
cpath = path + cname
namespace[cpath, "target"] = child.inchannels[inchannel], node
for outchannel in outchannels:
cpath = path + outchannel
namespace[cpath, "source"] = child.outchannels[outchannel], node
else: #not structured
for c in inchannels + outchannels:
assert not len(c) #should have been checked by highlevel
if ct == "code":
if node["language"] in ("python", "ipython"):
if node.get("transformer"):
child = core_cell("transformer")
else:
child = core_cell(node["language"])
else:
child = core_cell("text")
child.set_file_extension(node["file_extension"])
elif ct in direct_celltypes:
child = core_cell(ct)
if ct == "mixed":
child._hash_pattern = node.get("hash_pattern")
else:
raise ValueError(ct) #unknown celltype; should have been caught by high level
if node.get("fingertip_no_recompute"):
child._fingertip_recompute = False
if node.get("fingertip_no_remote"):
child._fingertip_remote = False
setattr(parent, name, child)
pathstr = "." + ".".join(path)
checksum = node.get("checksum")
if checksum is not None:
if ct == "structured":
set_structured_cell_from_checksum(child, checksum)
else:
if "value" in checksum and not len(inchannels):
child._set_checksum(checksum["value"], initial=True)
"""
if "temp" in checksum:
assert len(checksum) == 1, checksum.keys()
child._set_checksum(checksum["temp"], initial=True)
"""
if ct != "structured":
if "file_extension" in node:
child.set_file_extension(node["file_extension"])
if "mount" in node:
child.mount(**node["mount"])
return child
def translate_connection(node, namespace, ctx):
from ..core.cell import Cell
from ..core.structured_cell import Inchannel, Outchannel
from ..core.worker import Worker, PinBase
source_path, target_path = node["source"], node["target"]
source, source_node, source_is_edit = get_path(
ctx, source_path, namespace, False,
return_node = True
)
if isinstance(source, StructuredCell):
source = source.outchannels[()]
target, target_node, target_is_edit = get_path(
ctx, target_path, namespace, True,
return_node=True
)
if isinstance(target, StructuredCell):
target = target.inchannels[()]
def do_connect(source, target):
if source_is_edit or target_is_edit:
msg = "Cannot set up an edit link involving a structured cell: %s (with %s)"
if not isinstance(source, Cell):
raise Exception(msg % (source.structured_cell(), target))
if not isinstance(target, Cell):
raise Exception(msg % (target.structured_cell(), source))
source.bilink(target)
return
if isinstance(source, Cell) or isinstance(target, Cell):
source.connect(target)
return
n = 0
while 1:
n += 1
con_name = "CONNECTION_" + str(n)
if con_name not in ctx._children:
break
hash_pattern = source.hash_pattern
if isinstance(source, Outchannel):
if hash_pattern is not None:
hash_pattern = access_hash_pattern(hash_pattern, source.subpath)
intermediate = core_cell("mixed", hash_pattern=hash_pattern)
setattr(ctx, con_name, intermediate)
source.connect(intermediate)
intermediate.connect(target)
if not isinstance(source, (Worker, PinBase, Outchannel, Cell)):
raise TypeError(source)
if not isinstance(target, (Worker, PinBase, Inchannel, Cell)):
raise TypeError(target)
do_connect(source, target)
def translate_link(node, namespace, ctx):
first = get_path_link(
ctx, node["first"], namespace
)
second = get_path_link(
ctx, node["second"], namespace
)
first.bilink(second)
translate_compiled_transformer = None
translate_bash_transformer = None
def import_before_translate(graph):
global translate_compiled_transformer
global translate_bash_transformer
impvars = (
"translate_compiled_transformer",
"translate_bash_transformer",
)
if all([globals()[var] is not None for var in impvars]):
return
nodes = graph["nodes"]
for node in nodes:
t = node["type"]
if t == "transformer":
if node["compiled"]:
from .translate_compiled_transformer import translate_compiled_transformer
elif node["language"] == "bash":
from .translate_bash_transformer import translate_bash_transformer
def translate(graph, ctx, environment):
from ..core.macro_mode import curr_macro
if curr_macro() is None:
print_info("*" * 30 + "TRANSLATE" + "*" * 30)
#import traceback; stack = traceback.extract_stack(); print("TRANSLATE:"); print("".join(traceback.format_list(stack[:3])))
nodes, connections = graph["nodes"], graph["connections"]
contexts = {con["path"]: con for con in nodes if con["type"] == "context"}
for path in sorted(contexts.keys(), key=lambda k:len(k)):
parent = get_path(ctx, path[:-1], None, is_target=False)
name = path[-1]
c = context()
setattr(parent, name, c)
# No need to add it to namespace, as long as the low-level graph structure is imitated
connection_paths = [(con["source"], con["target"]) for con in connections if con["type"] == "connection"]
namespace = {}
for node in nodes:
t = node["type"]
if t in ("context", "link"):
continue
path = node["path"]
for node in nodes:
t = node["type"]
if t in ("context", "link"):
continue
path = node["path"]
if t == "transformer":
inchannels, outchannels = find_channels(node["path"], connection_paths)
try:
inchannels.remove(("meta",))
has_meta_connection = True
except ValueError:
has_meta_connection = False
language = node["language"]
if node["compiled"]:
from .translate_compiled_transformer import translate_compiled_transformer
translate_compiled_transformer(
node, ctx, namespace, inchannels, outchannels,
has_meta_connection=has_meta_connection
)
elif language == "bash":
translate_bash_transformer(
node, ctx, namespace, inchannels, outchannels,
has_meta_connection=has_meta_connection
)
else:
ipy_template = None
py_bridge = None
if language not in ("python", "ipython"):
ok = False
if environment is not None:
try:
ipy_template = environment.get_ipy_template(language)
ok = True
except KeyError:
pass
try:
py_bridge = environment.get_py_bridge(language)
ok = True
except KeyError:
pass
if ipy_template is not None and py_bridge is not None:
msg = "Language '{}' has an IPython template AND a Python bridge"
raise ValueError(msg.format(language))
if not ok:
raise NotImplementedError(language)
translate_py_transformer(
node, ctx, namespace, inchannels, outchannels,
ipy_template=ipy_template,
py_bridge=py_bridge,
has_meta_connection=has_meta_connection
)
elif t == "macro":
if node["language"] != "python":
raise NotImplementedError(node["language"])
inchannels, outchannels = find_channels(node["path"], connection_paths)
translate_macro(node, ctx, namespace, inchannels, outchannels)
elif t == "cell":
inchannels, outchannels = find_channels(path, connection_paths)
translate_cell(node, ctx, namespace, inchannels, outchannels)
elif t == "module":
inchannels, outchannels = find_channels(path, connection_paths)
translate_module(node, ctx, namespace, inchannels, outchannels)
elif t == "libinstance":
msg = "Libinstance '%s' was not removed during pre-translation"
raise TypeError(msg % ("." + ".".join(path)))
else:
raise TypeError(t)
node.pop("UNTRANSLATED", None)
node.pop("UNSHARE", None)
namespace2 = OrderedDict()
for k in sorted(namespace.keys(), key=lambda k:-len(k)):
namespace2[k] = namespace[k]
for connection in connections:
if connection["type"] == "connection":
translate_connection(connection, namespace2, ctx)
elif connection["type"] == "link":
translate_link(connection, namespace2, ctx)
elif connection["type"] == "virtual":
pass
else:
raise TypeError(connection["type"])
from .translate_py_transformer import translate_py_transformer
from .translate_macro import translate_macro
from .translate_module import translate_module
'''
# imported only at need...
from .translate_bash_transformer import translate_bash_transformer
from .translate_compiled_transformer import translate_compiled_transformer
'''
from ..core.protocol.deep_structure import apply_hash_pattern_sync, access_hash_pattern | sjdv1982/seamless | seamless/midlevel/translate.py | Python | mit | 14,125 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyscf.tdscf import *
| gkc1000/pyscf | pyscf/tddft/__init__.py | Python | apache-2.0 | 660 |
from django.apps import AppConfig
class FooConfig(AppConfig):
name = '<%= slug %>.foo'
| vadimadr/generator-djdj | generators/app/templates/django_project_template/foo/apps.py | Python | mit | 93 |
# Reformat data into CSV
#
# @autor Luke Munro
import Trainer, csv
import DeepNN as NN
import sys as SYS
Ash = Trainer.Trainer(24, 3, NN.NNet(24, 3))
f = open("move_record3#{0}.csv".format(SYS.argv[1]), "wt")
writer = csv.writer(f)
raw_data = Ash.data_from_record(SYS.argv[1])
for pair in raw_data:
old_state = pair[0]
new_state = pair[1]
move = Ash.get_training_move(old_state, new_state).reshape(1, 24).tolist()[0]
old_state.append(move.index(1))
print move
writer.writerow(old_state)
f.close() | lmunro0402/ShallowBlue | FormatCSV.py | Python | mit | 504 |
"""CS411Project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from meetu import urls as meetu_urls
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^/*', include(meetu_urls)),
]
| cs411sp15vmnjhtdw/MeetU | CS411Project/urls.py | Python | mit | 837 |
"""SCons.Tool.rpm
Tool-specific initialization for rpm.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
The rpm tool calls the rpmbuild command. The first and only argument should a
tar.gz consisting of the source file and a specfile.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/rpm.py 2009/09/04 16:33:07 david"
import os
import re
import shutil
import subprocess
import SCons.Builder
import SCons.Node.FS
import SCons.Util
import SCons.Action
import SCons.Defaults
def get_cmd(source, env):
tar_file_with_included_specfile = source
if SCons.Util.is_List(source):
tar_file_with_included_specfile = source[0]
return "%s %s %s"%(env['RPM'], env['RPMFLAGS'],
tar_file_with_included_specfile.abspath )
def build_rpm(target, source, env):
# create a temporary rpm build root.
tmpdir = os.path.join( os.path.dirname( target[0].abspath ), 'rpmtemp' )
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
# now create the mandatory rpm directory structure.
for d in ['RPMS', 'SRPMS', 'SPECS', 'BUILD']:
os.makedirs( os.path.join( tmpdir, d ) )
# set the topdir as an rpmflag.
env.Prepend( RPMFLAGS = '--define \'_topdir %s\'' % tmpdir )
# now call rpmbuild to create the rpm package.
handle = subprocess.Popen(get_cmd(source, env),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
output = handle.stdout.read()
status = handle.wait()
if status:
raise SCons.Errors.BuildError( node=target[0],
errstr=output,
filename=str(target[0]) )
else:
# XXX: assume that LC_ALL=c is set while running rpmbuild
output_files = re.compile( 'Wrote: (.*)' ).findall( output )
for output, input in zip( output_files, target ):
rpm_output = os.path.basename(output)
expected = os.path.basename(input.get_path())
assert expected == rpm_output, "got %s but expected %s" % (rpm_output, expected)
shutil.copy( output, input.abspath )
# cleanup before leaving.
shutil.rmtree(tmpdir)
return status
def string_rpm(target, source, env):
try:
return env['RPMCOMSTR']
except KeyError:
return get_cmd(source, env)
rpmAction = SCons.Action.Action(build_rpm, string_rpm)
RpmBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$RPMCOM', '$RPMCOMSTR'),
source_scanner = SCons.Defaults.DirScanner,
suffix = '$RPMSUFFIX')
def generate(env):
"""Add Builders and construction variables for rpm to an Environment."""
try:
bld = env['BUILDERS']['Rpm']
except KeyError:
bld = RpmBuilder
env['BUILDERS']['Rpm'] = bld
env.SetDefault(RPM = 'LC_ALL=c rpmbuild')
env.SetDefault(RPMFLAGS = SCons.Util.CLVar('-ta'))
env.SetDefault(RPMCOM = rpmAction)
env.SetDefault(RPMSUFFIX = '.rpm')
def exists(env):
return env.Detect('rpmbuild')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| cournape/numscons | numscons/scons-local/scons-local-1.2.0/SCons/Tool/rpm.py | Python | bsd-3-clause | 4,537 |
"""Tests for certbot_dns_route53._internal.dns_route53.Authenticator"""
import unittest
from botocore.exceptions import ClientError
from botocore.exceptions import NoCredentialsError
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock # type: ignore
from certbot import errors
from certbot.compat import os
from certbot.plugins import dns_test_common
from certbot.plugins.dns_test_common import DOMAIN
class AuthenticatorTest(unittest.TestCase, dns_test_common.BaseAuthenticatorTest):
# pylint: disable=protected-access
def setUp(self):
from certbot_dns_route53._internal.dns_route53 import Authenticator
super().setUp()
self.config = mock.MagicMock()
# Set up dummy credentials for testing
os.environ["AWS_ACCESS_KEY_ID"] = "dummy_access_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "dummy_secret_access_key"
self.auth = Authenticator(self.config, "route53")
def tearDown(self):
# Remove the dummy credentials from env vars
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
def test_perform(self):
self.auth._change_txt_record = mock.MagicMock()
self.auth._wait_for_change = mock.MagicMock()
self.auth.perform([self.achall])
self.auth._change_txt_record.assert_called_once_with("UPSERT",
'_acme-challenge.' + DOMAIN,
mock.ANY)
self.assertEqual(self.auth._wait_for_change.call_count, 1)
def test_perform_no_credentials_error(self):
self.auth._change_txt_record = mock.MagicMock(side_effect=NoCredentialsError)
self.assertRaises(errors.PluginError,
self.auth.perform,
[self.achall])
def test_perform_client_error(self):
self.auth._change_txt_record = mock.MagicMock(
side_effect=ClientError({"Error": {"Code": "foo"}}, "bar"))
self.assertRaises(errors.PluginError,
self.auth.perform,
[self.achall])
def test_cleanup(self):
self.auth._attempt_cleanup = True
self.auth._change_txt_record = mock.MagicMock()
self.auth.cleanup([self.achall])
self.auth._change_txt_record.assert_called_once_with("DELETE",
'_acme-challenge.'+DOMAIN,
mock.ANY)
def test_cleanup_no_credentials_error(self):
self.auth._attempt_cleanup = True
self.auth._change_txt_record = mock.MagicMock(side_effect=NoCredentialsError)
self.auth.cleanup([self.achall])
def test_cleanup_client_error(self):
self.auth._attempt_cleanup = True
self.auth._change_txt_record = mock.MagicMock(
side_effect=ClientError({"Error": {"Code": "foo"}}, "bar"))
self.auth.cleanup([self.achall])
class ClientTest(unittest.TestCase):
# pylint: disable=protected-access
PRIVATE_ZONE = {
"Id": "BAD-PRIVATE",
"Name": "example.com",
"Config": {
"PrivateZone": True
}
}
EXAMPLE_NET_ZONE = {
"Id": "BAD-WRONG-TLD",
"Name": "example.net",
"Config": {
"PrivateZone": False
}
}
EXAMPLE_COM_ZONE = {
"Id": "EXAMPLE",
"Name": "example.com",
"Config": {
"PrivateZone": False
}
}
FOO_EXAMPLE_COM_ZONE = {
"Id": "FOO",
"Name": "foo.example.com",
"Config": {
"PrivateZone": False
}
}
def setUp(self):
from certbot_dns_route53._internal.dns_route53 import Authenticator
self.config = mock.MagicMock()
# Set up dummy credentials for testing
os.environ["AWS_ACCESS_KEY_ID"] = "dummy_access_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "dummy_secret_access_key"
self.client = Authenticator(self.config, "route53")
def tearDown(self):
# Remove the dummy credentials from env vars
del os.environ["AWS_ACCESS_KEY_ID"]
del os.environ["AWS_SECRET_ACCESS_KEY"]
def test_find_zone_id_for_domain(self):
self.client.r53.get_paginator = mock.MagicMock()
self.client.r53.get_paginator().paginate.return_value = [
{
"HostedZones": [
self.EXAMPLE_NET_ZONE,
self.EXAMPLE_COM_ZONE,
]
}
]
result = self.client._find_zone_id_for_domain("foo.example.com")
self.assertEqual(result, "EXAMPLE")
def test_find_zone_id_for_domain_pagination(self):
self.client.r53.get_paginator = mock.MagicMock()
self.client.r53.get_paginator().paginate.return_value = [
{
"HostedZones": [
self.PRIVATE_ZONE,
self.EXAMPLE_COM_ZONE,
]
},
{
"HostedZones": [
self.PRIVATE_ZONE,
self.FOO_EXAMPLE_COM_ZONE,
]
}
]
result = self.client._find_zone_id_for_domain("foo.example.com")
self.assertEqual(result, "FOO")
def test_find_zone_id_for_domain_no_results(self):
self.client.r53.get_paginator = mock.MagicMock()
self.client.r53.get_paginator().paginate.return_value = []
self.assertRaises(errors.PluginError,
self.client._find_zone_id_for_domain,
"foo.example.com")
def test_find_zone_id_for_domain_no_correct_results(self):
self.client.r53.get_paginator = mock.MagicMock()
self.client.r53.get_paginator().paginate.return_value = [
{
"HostedZones": [
self.PRIVATE_ZONE,
self.EXAMPLE_NET_ZONE,
]
},
]
self.assertRaises(errors.PluginError,
self.client._find_zone_id_for_domain,
"foo.example.com")
def test_change_txt_record(self):
self.client._find_zone_id_for_domain = mock.MagicMock()
self.client.r53.change_resource_record_sets = mock.MagicMock(
return_value={"ChangeInfo": {"Id": 1}})
self.client._change_txt_record("FOO", DOMAIN, "foo")
call_count = self.client.r53.change_resource_record_sets.call_count
self.assertEqual(call_count, 1)
def test_change_txt_record_delete(self):
self.client._find_zone_id_for_domain = mock.MagicMock()
self.client.r53.change_resource_record_sets = mock.MagicMock(
return_value={"ChangeInfo": {"Id": 1}})
validation = "some-value"
validation_record = {"Value": '"{0}"'.format(validation)}
self.client._resource_records[DOMAIN] = [validation_record]
self.client._change_txt_record("DELETE", DOMAIN, validation)
call_count = self.client.r53.change_resource_record_sets.call_count
self.assertEqual(call_count, 1)
call_args = self.client.r53.change_resource_record_sets.call_args_list[0][1]
call_args_batch = call_args["ChangeBatch"]["Changes"][0]
self.assertEqual(call_args_batch["Action"], "DELETE")
self.assertEqual(
call_args_batch["ResourceRecordSet"]["ResourceRecords"],
[validation_record])
def test_change_txt_record_multirecord(self):
self.client._find_zone_id_for_domain = mock.MagicMock()
self.client._get_validation_rrset = mock.MagicMock()
self.client._resource_records[DOMAIN] = [
{"Value": "\"pre-existing-value\""},
{"Value": "\"pre-existing-value-two\""},
]
self.client.r53.change_resource_record_sets = mock.MagicMock(
return_value={"ChangeInfo": {"Id": 1}})
self.client._change_txt_record("DELETE", DOMAIN, "pre-existing-value")
call_count = self.client.r53.change_resource_record_sets.call_count
call_args = self.client.r53.change_resource_record_sets.call_args_list[0][1]
call_args_batch = call_args["ChangeBatch"]["Changes"][0]
self.assertEqual(call_args_batch["Action"], "UPSERT")
self.assertEqual(
call_args_batch["ResourceRecordSet"]["ResourceRecords"],
[{"Value": "\"pre-existing-value-two\""}])
self.assertEqual(call_count, 1)
def test_wait_for_change(self):
self.client.r53.get_change = mock.MagicMock(
side_effect=[{"ChangeInfo": {"Status": "PENDING"}},
{"ChangeInfo": {"Status": "INSYNC"}}])
self.client._wait_for_change(1)
self.assertTrue(self.client.r53.get_change.called)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| stweil/letsencrypt | certbot-dns-route53/tests/dns_route53_test.py | Python | apache-2.0 | 9,471 |
#FLM: TT Hints Duplicator
# coding: utf-8
__copyright__ = __license__ = """
Copyright (c) 2015 Adobe Systems Incorporated. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
__doc__ = u'''
TT Hints Duplicator
This script was written to duplicate TT hinting data across compatible styles
of a typeface family, cutting the time needed for TT hinting by a significant
amount. The script is run as a FontLab macro, and does not need any of the
involved fonts to be open.
The script duplicates `tthints` files by reading information from the source
`tthints` file and associated fonts, and comparing this data to the target
fonts. It will not modify source- or target fonts in any way.
The script is smart enough to not re-process the source folder, so it is safe
to pick the root of a font project as the target directory.
Note:
1)
This script will not process Delta hints. If Delta hints are present in a
glyph, an error message will be output, and the Delta hints omitted from the
output `tthints` file.
2)
The script can only process TT instructions that are attached to *on-curve*
points, because those are the only ones that will have the same coordinates
in both PS and TT outlines. If there are hints attached to off-curve points,
the whole glyph will be omitted from the output `tthints` file.
3)
It is expected that overlaps are removed in the source CFF and TTF files.
This ensures outline predictability.
Depending on the drawing it can mean that there is some work to be done for
compatibilizing the outlines across the style, which is usually less work
than re-hinting.
4)
Duplicating horizontal sidebearing-hints is not supported at this time.
==================================================
Versions:
v1.5 - Dec 07 2015 - Point out major incompatibilities between TTF and CFF
outlines, and do not duplicate recipes for those glyphs.
v1.4 - Apr 18 2015 - Support reading instructions defined with point coordinates.
Add option to save instructions using point coordinates.
v1.3 - Apr 02 2015 - Now also works in FL Windows.
v1.2 - Mar 29 2015 - Speed improvement by reading/writing only glyphs listed
in the tthints file.
v1.1 - Mar 23 2015 - Support duplication of instructions in both directions.
v1.0 - Mar 04 2015 - First public release (Robothon 2015).
'''
import sys
import os
import time
import itertools
from FL import *
from robofab.world import CurrentFont
from robofab.objects.objectsRF import RFont
'''(The RFont object from robofab.world is not appropriate \
in this case, because it would create a new FL font.)'''
fl.output = ''
MAC = False
PC = False
if sys.platform in ('mac', 'darwin'):
MAC = True
elif os.name == 'nt':
PC = True
'Adding the FDK Path to the env variable (on Mac only) so that command line tools can be called from FontLab'
if MAC:
fdkPathMac = os.sep.join((os.path.expanduser('~'), 'bin', 'FDK', 'tools', 'osx'))
envPath = os.environ["PATH"]
newPathString = envPath + ":" + fdkPathMac
if fdkPathMac not in envPath:
os.environ["PATH"] = newPathString
if PC:
from subprocess import Popen, PIPE
# numerical identifiers for different kinds of hints
vAlignLinkTop = 1
vAlignLinkBottom = 2
hSingleLink = 3
vSingleLink = 4
hDoubleLink = 5
vDoubleLink = 6
hAlignLinkNear = 7
vAlignLinkNear = 8
hInterpolateLink = 13
vInterpolateLink = 14
hMidDelta = 20
vMidDelta = 21
hFinDelta = 22
vFinDelta = 23
deltas = [hMidDelta, hFinDelta, vMidDelta, vFinDelta]
interpolations = [hInterpolateLink, vInterpolateLink]
links = [hSingleLink, hDoubleLink, vSingleLink, vDoubleLink]
alignments = [vAlignLinkTop, vAlignLinkNear, vAlignLinkBottom, hAlignLinkNear]
kTTFFileName = "font.ttf"
kPFAFileName = "font.pfa"
kTXTFileName = "font.txt"
kUFOFileName = "font.ufo"
kTTHintsFileName = "tthints"
# -----------
'Code for identifying segment intersection:'
class Point:
def __init__(self,x,y):
self.x = x
self.y = y
def ccw(A,B,C):
return (C.y-A.y)*(B.x-A.x) > (B.y-A.y)*(C.x-A.x)
def segmentsIntersect(seg1, seg2):
# http://www.bryceboe.com/2006/10/23/line-segment-intersection-algorithm/
A,B = Point(seg1[0][0],seg1[0][1]), Point(seg1[1][0],seg1[1][1])
C,D = Point(seg2[0][0],seg2[0][1]), Point(seg2[1][0],seg2[1][1])
return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
# -----------
class MyHintedNode:
def __init__(self, nodeXpos, nodeYpos, nodeIndexTT, nodeIndexT1):
self.nodeXpos = nodeXpos
self.nodeYpos = nodeYpos
self.nodeIndexTT = nodeIndexTT
self.nodeIndexT1 = nodeIndexT1
def getGlyphOncurveCoords(glyph):
'Collects on-curve coordinates for all contours of a given glyph.'
glyphCoordsDict = {}
for contourIndex in range(len(glyph)):
contour = glyph[contourIndex]
pointsList = []
testList = []
for pt in contour.bPoints:
pointsList.append(pt.anchor)
if len(pointsList) > 2 and pointsList[0] == pointsList[-1]:
# Post-process the pointsList:
# Depending on the position of the start point, FL may store it
# at both position 0 and -1. If that happens, the final (dupliacted)
# point will be removed from the list.
pointsList.pop()
glyphCoordsDict[contourIndex] = pointsList
return glyphCoordsDict
def getSegmentsList(ptDict1, ptDict2):
'Creates lists of individual segments and compares their length across glyphs.'
segmentsList = []
for i in ptDict1.keys():
for j in range(len(ptDict1[i])):
try: # If the contours are out of order, a "IndexError: list index out of range" may happen
segmentsList.append([ ptDict1[i][j], ptDict2[i][j] ])
except:
return # contour mismatch found
return segmentsList
def closeAllOpenedFonts():
for i in range(len(fl))[::-1]: # [::-1] reverses the list
fl.Close(i)
def getFolderPaths(path, templatePath):
'''
Returns any folder that contains either of the possible input fonts
(PFA, TXT, or UFO) and an adjactent TTF -- except the template folder.
'''
folderPathsList = []
for root, dirs, files in os.walk(path):
PFApath = os.path.join(root, kPFAFileName)
TXTpath = os.path.join(root, kTXTFileName)
UFOpath = os.path.join(root, kUFOFileName)
TTFpath = os.path.join(root, kTTFFileName)
if (os.path.exists(TTFpath)
and (os.path.exists(PFApath)
or os.path.exists(TXTpath)
or os.path.exists(UFOpath))):
if root != templatePath:
folderPathsList.append(root)
return folderPathsList
def saveNewTTHintsFile(folderPath, contentList):
filePath = os.path.join(folderPath, kTTHintsFileName)
outfile = open(filePath, 'w')
outfile.writelines(contentList)
outfile.close()
def readTTHintsFile(filePath):
'''
Reads a tthints file, and returns a tuple:
- a list storing the glyph order of the input file
- a dict {glyph name: raw hinting string}
'''
tthfile = open(filePath, "r")
tthdata = tthfile.read()
tthfile.close()
lines = tthdata.splitlines()
glyphList = []
rawHintingDict = {}
for line in lines:
# Skip over blank lines
stripline = line.strip()
if not stripline:
continue
# Get rid of all comments
if stripline[0] == '#':
continue
else:
if len(line.split()) >= 2:
gName = line.split()[0]
gHintingString = line.split()[1]
glyphList.append(gName)
rawHintingDict[gName] = gHintingString
return glyphList, rawHintingDict
def collectT1nodeIndexes(gName, t1font):
gIndex = t1font.FindGlyph(gName)
if gIndex != -1:
glyph = t1font[gName]
else:
print "ERROR: Glyph %s not found in PS font." % gName
return
nodesDict = {}
if glyph.nodes: # Just making sure that there's an outline in there ...
for nodeIndex in range(len(glyph)):
if glyph.nodes[nodeIndex].type != nOFF: # Ignore off-curve nodes
nodeCoords = (glyph.nodes[nodeIndex].x, glyph.nodes[nodeIndex].y)
if nodeCoords not in nodesDict:
nodesDict[nodeCoords] = nodeIndex
return nodesDict, len(glyph)
def collectTTnodeIndexes(gName, ttfont):
gIndex = ttfont.FindGlyph(gName)
if gIndex != -1:
glyph = ttfont[gName]
else:
print "ERROR: Glyph %s not found in target TT font." % gName
return
nodesDict = {}
if glyph.nodes: # Just making sure that there's an outline in there...
for nodeIndex in range(len(glyph)):
if glyph.nodes[nodeIndex].type != nOFF: # Ignore off-curve nodes
nodeCoords = (glyph.nodes[nodeIndex].x, glyph.nodes[nodeIndex].y)
if nodeCoords not in nodesDict:
nodesDict[nodeCoords] = nodeIndex
else:
print "ERROR: Overlapping node found in glyph %s at %s." % (gName, nodeCoords)
return nodesDict
def transformCommandList(glyph, raw_commandList):
'''
Transforms a list of commands with point coordinates
to an list of commands with point indexes, for instance:
input: [4, (155, 181), (180, 249), 0, -1]
output: [4, 6, 9, 0, -1]
input: [3, 'BL', (83, 0), 0, -1]
output: [3, 34, 0, 0, -1]
Also is used to check validity of point coordinates, and
transforming sidebearing flags to point indexes.
'''
# pointDict = {(point.x, point.y): pointIndex for pointIndex, point in enumerate(glyph.nodes)}
pointDict = dict(((point.x, point.y), pointIndex) for pointIndex, point in enumerate(glyph.nodes))
output = []
for item in raw_commandList:
if item == 'BL':
'left sidebearing hinted'
output.append(len(glyph))
elif item == 'BR':
'right sidebearing hinted'
output.append(len(glyph) + 1)
elif isinstance(item, tuple):
'point coordinates'
pointIndex = pointDict.get(item, None)
if pointIndex == None:
print '\tERROR: point %s does not exist in glyph %s.' % (item, glyph.name)
output.append(pointIndex)
else:
'other hinting data, integers'
output.append(item)
if None in output:
return []
else:
return output
def collectTemplateIndexes(ttfont, t1font, glyphList, rawHintingDict):
'''
Creates a dictionary from template font files and the template tthints file.
keys: glyph names for all hinted glyphs
values: a dictionary of point indexes as keys, and MyHintedNode instances as values.
MyHintedNode contains x, y, templateTTIndex, templateT1Index for a given point.
'''
okToProcessTargetFonts = True
outputDict = {}
indexOnlyRawHintingDict = {}
for gName in glyphList:
writeGlyphRecipe = True
gIndex = ttfont.FindGlyph(gName)
if gIndex != -1:
glyph = ttfont[gName]
else:
print "ERROR: Glyph %s not found in TT font." % gName
continue
# This dictionary is indexed by the combination of the coordinates of each node of the current glyph:
t1GlyphNodeIndexDict, t1GlyphNodesCount = collectT1nodeIndexes(gName, t1font)
hintedNodesDict = {} # This dictionary is indexed by the node indexes of the template TT font
gHintsString = rawHintingDict[gName]
gHintsList = gHintsString.split(";")
indexOnlyRawHintingList = []
for commandString in gHintsList:
raw_commandList = list(eval(commandString))
commandType = raw_commandList[0]
commandList = transformCommandList(glyph, raw_commandList)
if not commandList:
print "ERROR: Problems with reading recipe of glyph %s" % (gName)
writeGlyphRecipe = False
break
if len(commandList) < 3:
print "ERROR: A hint definition for glyph %s does not have enough parameters: %s" % (gName, commandString)
writeGlyphRecipe = False
break
if commandType in deltas:
print "INFO: Delta hints are not transferred. Skipping hint (%s) in %s ..." % (commandString, gName)
elif commandType in links:
nodes = commandList[1:3]
elif commandType in alignments + interpolations:
nodes = commandList[1:-1]
else:
print "WARNING: Hint type %d in glyph %s is not supported." % (commandType, gName)
continue
indexOnlyRawHintingList.append(','.join(map(str,commandList)))
for hintedNodeIndex in nodes:
targetGlyph = ttfont[gIndex]
node = targetGlyph[hintedNodeIndex]
sidebearingIndexes = [len(targetGlyph), len(targetGlyph)+1]
try:
node.type
# This check makes sure that a referenced node index actually exist
# in an outline. However, it also skips any glyphs with hinted
# sidebearings, because those 'nodes' are represented as len(glyph),
# and len(glyph)+1.
except:
if hintedNodeIndex in sidebearingIndexes:
print "ERROR: Sidebearings have been hinted in %s, which is not (yet) supported. Skipping glyph ..." % gName
else:
print "ERROR: Hinting problem in %s. Skipping glyph ..." % gName
okToProcessTargetFonts = False
continue
if node.type == nOFF:
# Ignore off-curve nodes in TrueType, do not write glyph recipe to the output file
print "Node #%d in glyph %s is off-curve. Skipping glyph ..." % (hintedNodeIndex, gName)
writeGlyphRecipe = False
break
else:
nodeCoords = (node.x, node.y)
if nodeCoords in t1GlyphNodeIndexDict:
t1NodeIndex = t1GlyphNodeIndexDict[nodeCoords]
hintedNode = MyHintedNode(node.x, node.y, hintedNodeIndex, t1NodeIndex)
if hintedNodeIndex not in hintedNodesDict:
hintedNodesDict[hintedNodeIndex] = hintedNode
else:
print "ERROR in %s: Could not find an on-curve point at (%s) in the PS font." % (gName, ', '.join(map(str, nodeCoords)))
if writeGlyphRecipe:
outputDict[gName] = hintedNodesDict
indexOnlyRawHintingDict[gName] = ';'.join(indexOnlyRawHintingList)
return outputDict, indexOnlyRawHintingDict, okToProcessTargetFonts
def getNewTTindexes(glyph, nodeIndexList, ttGlyphNodeIndexDict, rawHintingDict):
newTTindexesList = []
newTTcoordsList = []
templateTTdict = rawHintingDict[glyph.name]
for templateTTindex in nodeIndexList:
try:
templateT1index = int(templateTTdict[templateTTindex].nodeIndexT1)
except KeyError:
templateT1index = None
print 'INFO: Major incompatibility (TTF vs CFF) in glyph %s.' % glyph.name
return
if templateT1index != None:
try:
targetT1nodeCoords = (glyph.nodes[templateT1index].x, glyph.nodes[templateT1index].y)
except IndexError:
# Again, FontLab's fantastic ability to make a contour longer than it is, by re-inserting
# the first point a second time in position -1. In this case, the templateT1index is
# re-set to be the first point of the last contour, which makes no functional difference.
if templateT1index == len(glyph):
numberOfCountours = glyph.GetContoursNumber()
firstPointOfLastContour = glyph.GetContourBegin(numberOfCountours-1)
templateT1index = firstPointOfLastContour
targetT1nodeCoords = (glyph.nodes[templateT1index].x, glyph.nodes[templateT1index].y)
else:
print 'I give up.'
if targetT1nodeCoords in ttGlyphNodeIndexDict:
newTTindexesList.append(ttGlyphNodeIndexDict[targetT1nodeCoords])
newTTcoordsList.append(targetT1nodeCoords)
else:
print "Could not find target node in %s." % glyph.name
# It is probably better to not write the remaning hinting recipe for a
# given glyph at all if one of its points is not found in the target TTF.
return
return newTTindexesList, newTTcoordsList
def processTargetFonts(folderPathsList, templateT1RBfont, hintedNodeDict, glyphList, rawHintingDict, writeCoordinates):
totalFolders = len(folderPathsList)
print "%d folders found" % totalFolders
fontIndex = 1
for targetFolderPath in folderPathsList:
deleteTempPFA = False
targetFolderName = os.path.basename(targetFolderPath)
pfaFilePath = os.path.join(targetFolderPath, kPFAFileName)
txtFilePath = os.path.join(targetFolderPath, kTXTFileName)
ufoFilePath = os.path.join(targetFolderPath, kUFOFileName)
if os.path.exists(pfaFilePath):
pass
elif os.path.exists(txtFilePath):
deleteTempPFA = True
makePFAfromTXT(txtFilePath, pfaFilePath)
elif os.path.exists(ufoFilePath):
deleteTempPFA = True
makePFAfromUFO(ufoFilePath, pfaFilePath)
else:
print "ERROR: Could not find target %s/%s file. Skipping %s folder ..." % (kPFAFileName, kTXTFileName, targetFolderName)
continue
ttfFilePath = os.path.join(targetFolderPath, kTTFFileName)
if not os.path.exists(ttfFilePath):
print "ERROR: Could not find target %s file. Skipping %s folder ..." % (kTTFFileName, targetFolderName)
continue
print "\nProcessing %s ... (%d/%d)" % (targetFolderName, fontIndex, totalFolders)
fontIndex += 1
fl.Open(pfaFilePath)
targetT1font = fl[fl.ifont]
targetT1RBfont = CurrentFont()
fl.Open(ttfFilePath)
targetTTfont = fl[fl.ifont]
newTTHintsFileList = ["# Glyph name\tTT hints\tGlyph color\n"]
filteredGlyphList = [gName for gName in glyphList if gName in hintedNodeDict]
for gName in filteredGlyphList:
gMark = None
gIndex = targetT1font.FindGlyph(gName)
if gIndex != -1:
glyph = targetT1font[gName]
else:
print "ERROR: Glyph %s not found in target PS font." % gName
continue
# Test outline compatibility between the two glyphs (template and target)
templateT1RBglyph = templateT1RBfont[gName]
targetT1RBglyph = targetT1RBfont[gName]
if not templateT1RBglyph.isCompatible(targetT1RBglyph, False):
# (NOTE: This method doesn't catch the case in which node indexes have rotated)
print "DEFINITELY NOT COMPATIBLE: %s. Skipping..." % gName
continue
# Verify glyph compatibility by comparing the length of segments:
# Create dictionaries of the coodinates of on-curve points:
ptDict1 = getGlyphOncurveCoords(templateT1RBglyph)
ptDict2 = getGlyphOncurveCoords(targetT1RBglyph)
# Define segments using the point coordinates from ptDict1 and ptDict2:
segmentsList = getSegmentsList(ptDict1, ptDict2)
if not segmentsList:
print "DEFINITELY NOT COMPATIBLE (contour mismatch): %s. Skipping ..." % gName
continue
# Get all pair combinations of those segments:
segmentCombinationsList = list(itertools.combinations(segmentsList, 2))
# Iterate through the segment combinations and stop as soon
# as an intersection between two segments is found:
for combination in segmentCombinationsList:
seg1, seg2 = combination[0], combination[1]
if segmentsIntersect(seg1, seg2):
print "POSSIBLY NOT COMPATIBLE: %s. Please check ..." % gName
gMark = 25 # orange
break # one incompatibility was found; no need to report it more than once
# This dictionary is indexed by the combination of
# the coordinates of each node of the current glyph:
ttGlyphNodeIndexDict = collectTTnodeIndexes(gName, targetTTfont)
newHintsList = []
gHintsString = rawHintingDict[gName]
gHintsList = gHintsString.split(";")
for commandString in gHintsList:
commandList = list(eval(commandString))
commandType = commandList[0]
if len(commandList):
if commandType in deltas:
continue
elif commandType in alignments:
nodes = [commandList[1]]
convertedNodes = getNewTTindexes(glyph, nodes, ttGlyphNodeIndexDict, hintedNodeDict)
if convertedNodes != None:
writeLine = True
targetNodeIndexList, targetNodeCoordsList = convertedNodes
hintParamsList = [commandList[-1]]
else:
writeLine = False
break
elif commandType in links:
nodes = commandList[1:3]
convertedNodes = getNewTTindexes(glyph, nodes, ttGlyphNodeIndexDict, hintedNodeDict)
if convertedNodes != None:
writeLine = True
targetNodeIndexList, targetNodeCoordsList = convertedNodes
hintParamsList = commandList[3:]
else:
writeLine = False
break
elif commandType in interpolations:
nodes = commandList[1:-1]
convertedNodes = getNewTTindexes(glyph, nodes, ttGlyphNodeIndexDict, hintedNodeDict)
if convertedNodes != None:
writeLine = True
targetNodeIndexList, targetNodeCoordsList = convertedNodes
hintParamsList = [commandList[-1]]
else:
writeLine = False
break
if writeLine:
if writeCoordinates:
targetNodeList = targetNodeCoordsList
else:
targetNodeList = targetNodeIndexList
newCommandList = [commandType] + targetNodeList + hintParamsList
newCommandString = ','.join(map(str, newCommandList))
newHintsList.append(newCommandString.replace(" ", ""))
if writeLine:
newHintsLine = "%s\t%s" % (gName, ';'.join(newHintsList))
if gMark:
newHintsLine = "%s\t%s" % (newHintsLine, gMark)
newTTHintsFileList.append(newHintsLine + "\n")
saveNewTTHintsFile(targetFolderPath, newTTHintsFileList)
closeAllOpenedFonts()
if deleteTempPFA:
if os.path.exists(pfaFilePath):
os.remove(pfaFilePath)
def makePFAfromTXT(txtFilePath, pfaFilePath):
'Runs the `type1` command on a font.txt file to generate a temporary PFA.'
command = 'type1 "%s" > "%s"' % (txtFilePath, pfaFilePath)
# Run type1 tool
if MAC:
pp = os.popen(command)
report = pp.read()
pp.close()
if PC:
pp = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
out, err = pp.communicate()
if err:
print out, err
def makePFAfromUFO(ufoFilePath, pfaFilePath, glyphList=None):
'Runs the `tx` command on a UFO file to generate a temporary PFA.'
if glyphList:
command = 'tx -t1 -g %s "%s" > "%s"' % (','.join(glyphList), ufoFilePath, pfaFilePath)
else:
command = 'tx -t1 "%s" > "%s"' % (ufoFilePath, pfaFilePath)
# The order of the quotes above is extremely important.
# Windows will understand "File with spaces" but not 'File with spaces'.
# Run tx tool
if MAC:
pp = os.popen(command)
report = pp.read()
pp.close()
if PC:
pp = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
out, err = pp.communicate()
if err:
print out, err
def run(writeCoordinates=False):
# Get the folder that contains the source hinting data, and source font files:
templateFolderPath = fl.GetPathName("Select directory that contains the 'tthints' template file...")
if not templateFolderPath:
'Cancel was clicked or ESC was pressed'
return
tthintsFilePath = os.path.join(templateFolderPath, kTTHintsFileName)
# Verify that the files tthints, font.pfa/ufo and font.ttf exist in the folder provided:
if not os.path.exists(tthintsFilePath):
print "ERROR: Could not find %s file." % kTTHintsFileName
return
# Check if any of the possible template fonts exists -- PFA, TXT, or UFO:
pfaFilePath = os.path.join(templateFolderPath, kPFAFileName)
txtFilePath = os.path.join(templateFolderPath, kTXTFileName)
ufoFilePath = os.path.join(templateFolderPath, kUFOFileName)
if os.path.exists(pfaFilePath):
pass
elif os.path.exists(txtFilePath):
pass
elif os.path.exists(ufoFilePath):
pass
else:
print "ERROR: Could not find any of the following font files: %s, %s or %s." % \
(kPFAFileName, kTXTFileName, kUFOFileName)
return
# Check if font.ttf exists in source folder:
ttfFilePath = os.path.join(templateFolderPath, kTTFFileName)
if not os.path.exists(ttfFilePath):
print "ERROR: Could not find %s file." % kTTFFileName
return
# Get the (root) folder containingt the target font files:
baseFolderPath = fl.GetPathName("Select top directory that contains the fonts to process ...")
if not baseFolderPath:
'Cancel was clicked or ESC key was pressed'
return
startTime = time.time()
# Create a list of glyphs that have been hinted so it can be used as a filter.
# The rawHintingDict contains a string of raw hinting data for each glyph:
glyphList, rawHintingDict = readTTHintsFile(tthintsFilePath)
folderPathsList = getFolderPaths(baseFolderPath, templateFolderPath)
if len(folderPathsList):
delete_temporary_template_PFA = False
print "Processing template files..."
fl.Open(ttfFilePath)
templateTTfont = fl[fl.ifont]
if not os.path.exists(pfaFilePath) and os.path.exists(txtFilePath):
delete_temporary_template_PFA = True
makePFAfromTXT(txtFilePath, pfaFilePath)
elif not os.path.exists(pfaFilePath) and os.path.exists(ufoFilePath):
delete_temporary_template_PFA = True
makePFAfromUFO(ufoFilePath, pfaFilePath, glyphList)
fl.Open(pfaFilePath)
templateT1font = fl[fl.ifont]
# Make a Robofab font of the Type1 template font. This RB font is made
# by copying each glyph. There does not seem to be a simpler method
# that produces reliable results -- the challenge comes from having
# to close the FL font downstream.
templateT1RBfont = RFont()
currentT1RBfont = CurrentFont()
for gName in glyphList:
g = currentT1RBfont[gName]
templateT1RBfont.insertGlyph(g)
hintedNodeDict, indexOnlyRawHintingDict, okToProcessTargetFonts = collectTemplateIndexes(templateTTfont, templateT1font, glyphList, rawHintingDict)
closeAllOpenedFonts()
if okToProcessTargetFonts:
processTargetFonts(folderPathsList, templateT1RBfont, hintedNodeDict, glyphList, indexOnlyRawHintingDict, writeCoordinates)
else:
print "Can't process target fonts because of hinting errors found in template font."
if delete_temporary_template_PFA:
if os.path.exists(pfaFilePath):
os.remove(pfaFilePath)
else:
print "Could not find suitable folders to process."
endTime = time.time()
elapsedSeconds = endTime-startTime
if (elapsedSeconds/60) < 1:
print '\nCompleted in %.1f seconds.\n' % elapsedSeconds
else:
print '\nCompleted in %s minutes and %s seconds.\n' % (elapsedSeconds/60, elapsedSeconds%60)
if __name__ == "__main__":
run(writeCoordinates=False)
| shannpersand/cooper-type | _resources/FDK Adobe/Tools/FontLabMacros/TrueType/tthDupe.py | Python | cc0-1.0 | 30,346 |
#!/usr/bin/env python
"""Update encrypted deploy password in Travis config file
"""
from __future__ import print_function
import base64
import json
import os
from getpass import getpass
import yaml
from cryptography.hazmat.primitives.serialization import load_pem_public_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
try:
from urllib import urlopen
except:
from urllib.request import urlopen
GITHUB_REPO = 'ryananguiano/kafka-gConsumer'
TRAVIS_CONFIG_FILE = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '.travis.yml')
def load_key(pubkey):
"""Load public RSA key, with work-around for keys using
incorrect header/footer format.
Read more about RSA encryption with cryptography:
https://cryptography.io/latest/hazmat/primitives/asymmetric/rsa/
"""
try:
return load_pem_public_key(pubkey.encode(), default_backend())
except ValueError:
# workaround for https://github.com/travis-ci/travis-api/issues/196
pubkey = pubkey.replace('BEGIN RSA', 'BEGIN').replace('END RSA', 'END')
return load_pem_public_key(pubkey.encode(), default_backend())
def encrypt(pubkey, password):
"""Encrypt password using given RSA public key and encode it with base64.
The encrypted password can only be decrypted by someone with the
private key (in this case, only Travis).
"""
key = load_key(pubkey)
encrypted_password = key.encrypt(password, PKCS1v15())
return base64.b64encode(encrypted_password)
def fetch_public_key(repo):
"""Download RSA public key Travis will use for this repo.
Travis API docs: http://docs.travis-ci.com/api/#repository-keys
"""
keyurl = 'https://api.travis-ci.org/repos/{0}/key'.format(repo)
data = json.loads(urlopen(keyurl).read().decode())
if 'key' not in data:
errmsg = "Could not find public key for repo: {}.\n".format(repo)
errmsg += "Have you already added your GitHub repo to Travis?"
raise ValueError(errmsg)
return data['key']
def prepend_line(filepath, line):
"""Rewrite a file adding a line to its beginning.
"""
with open(filepath) as f:
lines = f.readlines()
lines.insert(0, line)
with open(filepath, 'w') as f:
f.writelines(lines)
def load_yaml_config(filepath):
with open(filepath) as f:
return yaml.load(f)
def save_yaml_config(filepath, config):
with open(filepath, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
def update_travis_deploy_password(encrypted_password):
"""Update the deploy section of the .travis.yml file
to use the given encrypted password.
"""
config = load_yaml_config(TRAVIS_CONFIG_FILE)
config['deploy']['password'] = dict(secure=encrypted_password)
save_yaml_config(TRAVIS_CONFIG_FILE, config)
line = ('# This file was autogenerated and will overwrite'
' each time you run travis_pypi_setup.py\n')
prepend_line(TRAVIS_CONFIG_FILE, line)
def main(args):
public_key = fetch_public_key(args.repo)
password = args.password or getpass('PyPI password: ')
update_travis_deploy_password(encrypt(public_key, password.encode()))
print("Wrote encrypted password to .travis.yml -- you're ready to deploy")
if '__main__' == __name__:
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--repo', default=GITHUB_REPO,
help='GitHub repo (default: %s)' % GITHUB_REPO)
parser.add_argument('--password',
help='PyPI password (will prompt if not provided)')
args = parser.parse_args()
main(args)
| ryananguiano/kafka-gConsumer | travis_pypi_setup.py | Python | mit | 3,742 |
"""
Transformers
This module contains transformers for preprocessing data. Most operate on DataFrames and are named appropriately.
"""
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from imblearn.over_sampling import RandomOverSampler
from imblearn.under_sampling import RandomUnderSampler
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.model_selection import RandomizedSearchCV
from healthcareai.common.healthcareai_error import HealthcareAIError
from tabulate import tabulate
SUPPORTED_IMPUTE_STRATEGY = ['MeanMode', 'RandomForest']
class DataFrameImputer( TransformerMixin ):
"""
Impute missing values in a dataframe.
Parameters
----------
impute : boolean, default=True
If True, imputation of missing value takes place.
If False, imputation of missing value doesn't happens.
verbose : boolean, default=True
Controls the verbosity.
If False : No text information will be shown about imputation of missing values
imputeStrategy : string, default='MeanMode'
It decides the technique to be used for imputation of missing values.
If imputeStrategy = 'MeanMode', Columns of dtype object or category
(assumed categorical) and imputed by the mode value of that column.
Columns of other types (assumed continuous) : by mean of column.
If imputeStrategy = 'RandomForest', Columns of dtype object or category
(assumed categorical) : imputed using RandomForestClassifier.
Columns of other types (assumed continuous) : imputed using RandomForestRegressor
tunedRandomForest : boolean, default=False
If set to True, RandomForestClassifier/RandomForestRegressor to be used for
imputation of missing values are tuned using grid search and K-fold cross
validation.
Note:
If set to True, imputation process may take longer time depending upon size of
dataframe and number of columns having missing values.
numeric_columns_as_categorical : List of type String, default=None
List of column names which are numeric(int/float) in dataframe, but by nature
they are to be considered as categorical.
For example:
There is a column JobCode( Levels : 1,2,3,4,5,6)
If there are missing values in JobCode column, panadas will by default convert
this column into type float.
If numeric_columns_as_categorical=None
Missing values of this column will be imputed by Mean value of JobCode column.
type of 'JobCode' column will remain float.
If numeric_columns_as_categorical=['JobCode']
Missing values of this column will be imputed by mode value of JobCode column.
Also final type of 'JobCode' column will be numpy.object
"""
def __init__(self, impute=True, verbose=True, imputeStrategy='MeanMode', tunedRandomForest=False,
numeric_columns_as_categorical=None ):
self.impute = impute
self.object_columns = None
self.fill = None
self.verbose = verbose
self.impute_Object = None
self.imputeStrategy = imputeStrategy
self.tunedRandomForest = tunedRandomForest
self.numeric_columns_as_categorical = numeric_columns_as_categorical
if self.numeric_columns_as_categorical is not None:
if type(numeric_columns_as_categorical) is str:
self.numeric_columns_as_categorical = [numeric_columns_as_categorical]
elif type(numeric_columns_as_categorical) is list:
self.numeric_columns_as_categorical = numeric_columns_as_categorical
else:
raise HealthcareAIError( "Please provide \'numeric_columns_as_categorical = {}\' parameter in string/list format (for single column) or in list format (for multiple columns)".format(numeric_columns_as_categorical) )
def fit(self, X, y=None):
"""
Description:
------------
If imputeStrategy is : 'MeanMode' / None
Missing value to be imputed are calculated using Mean and Mode of corresponding columns.
1. Columns specified in 'numeric_columns_as_categorical' are explicitly converted into dtype='object'
2. Values to be imputed are calculated and stored in variable: self.fill
3. Later inside transform function, the same values will be filled in place of missing values.
If imputeStrategy is : 'RandomForest'
1. Class object of DataFrameImputerRandomForest is created
2. fit function of DataFrameImputerRandomForest class is called.
"""
if self.impute is False:
return self
if ( self.imputeStrategy=='MeanMode' or self.imputeStrategy==None ):
if( self.numeric_columns_as_categorical is not None ):
for col in self.numeric_columns_as_categorical:
if( col not in list(X.columns) ):
raise HealthcareAIError('Column = {} mentioned in numeric_columns_as_categorical is not present in dataframe'.format(col))
else:
X[col] = X[col].astype( dtype='object', copy=True, error='raise' )
# Grab list of object column names before doing imputation
self.object_columns = X.select_dtypes(include=['object']).columns.values
num_nans = X.isnull().sum().sum()
num_total = X.shape[0] * X.shape[1]
percentage_imputed = num_nans / num_total * 100
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O')
or pd.api.types.is_categorical_dtype(X[c])
else X[c].mean() for c in X], index=X.columns)
if self.verbose:
print("Percentage Imputed: %.2f%%" % percentage_imputed)
print("Note: Impute will always happen on prediction dataframe, otherwise rows are dropped, and will lead "
"to missing predictions")
# return self for scikit compatibility
return self
elif ( self.imputeStrategy=='RandomForest' ):
self.impute_Object = DataFrameImputerRandomForest( tunedRandomForest=self.tunedRandomForest,
numeric_columns_as_categorical=self.numeric_columns_as_categorical,
impute=self.impute, verbose=self.verbose )
self.impute_Object.fit(X)
return self
else:
raise HealthcareAIError('A imputeStrategy must be one of these types: {}'.format(SUPPORTED_IMPUTE_STRATEGY))
def transform(self, X, y=None):
"""
Description:
------------
If imputeStrategy is : 'MeanMode' / None
Missing value to be imputed are calculated using Mean and Mode of corresponding columns.
1. Missing values of dataframe are filled using self.fill variable(generated in fill() function )
2. Columns specified in 'numeric_columns_as_categorical' are explicitly converted into dtype='object'
3. Columns captured in 'self.object_columns' during fill() function are ensured to be of dtype='object'
If imputeStrategy is : 'RandomForest'
1. Already Class object of DataFrameImputerRandomForest is created during fill() function.
2.. Now transform() function of DataFrameImputerRandomForest class is called.
"""
# Return if not imputing
if self.impute is False:
return X
if ( self.imputeStrategy=='MeanMode' or self.imputeStrategy==None ):
result = X.fillna(self.fill)
if( self.numeric_columns_as_categorical is not None ):
for col in self.numeric_columns_as_categorical:
result[col] = result[col].astype( dtype='object', copy=True, error='raise' )
for i in self.object_columns:
if result[i].dtype not in ['object', 'category']:
result[i] = result[i].astype('object')
return result
elif ( self.imputeStrategy=='RandomForest' ):
result = self.impute_Object.transform(X)
return result
else:
raise HealthcareAIError('A imputeStrategy must be one of these types: {}'.format(SUPPORTED_IMPUTE_STRATEGY))
class DataFrameImputerRandomForest( TransformerMixin ):
"""
Impute missing values in a dataframe using RandomForest models.
Parameters
----------
impute : boolean, default=True
If True, imputation of missing value takes place.
If False, imputation of missing value doesn't happens.
verbose : boolean, default=True
Controls the verbosity.
If False : No text information will be shown about imputation of missing values
tunedRandomForest : boolean, default=False
If set to True, RandomForestClassifier/RandomForestRegressor to be used for
imputation of missing values are tuned using grid search and K-fold cross
validation.
Note:
If set to True, imputation process may take longer time depending upon size of
dataframe and number of columns having missing values.
numeric_columns_as_categorical : List of type String, default=None
List of column names which are numeric(int/float) in dataframe, but by nature
they are to be considered as categorical.
For example:
There is a column JobCode( Levels : 1,2,3,4,5,6)
If there are missing values in JobCode column, panadas will by default convert
this column into type float.
If numeric_columns_as_categorical=None
Missing values of this column will be imputed by Mean value of JobCode column.
type of 'JobCode' column will remain float.
If numeric_columns_as_categorical=['JobCode']
Missing values of this column will be imputed by mode value of JobCode column.
Also final type of 'JobCode' column will be numpy.object
"""
def __init__(self, impute=True, verbose=True, tunedRandomForest=False, numeric_columns_as_categorical=None ):
self.impute = impute
self.object_columns = None
self.fill = None
self.verbose = verbose
self.tunedRandomForest = tunedRandomForest
self.numeric_columns_as_categorical=numeric_columns_as_categorical
self.fill_dict = {}
def fit(self, X, y=None):
"""
Description:
1. It segregate the list of all columns into 3 parts:
1. cat_list = list of categorical columns
2. num_list = list of numeric columns
3. num_as_cat_list = numeric columns to be considered as categorical (provided by user)
2. First of all missing values of num_as_cat_list are filled using 'Mode' values
by calling function : getNumericAsCategoricalImputedData.
At this point calculation of missing vales of [num_as_cat_list] columns is completed.
3. New dataframe is constructed as :
X_NumericAsCategoricalImputed(now don't have any null values) + X[ cat_list+num_list ](still have null values)
At this point calculation of missing vales of [num_as_cat_list] is completed.
4. Then after missing values of Numeric columns are imputed by calling function : getNumericImputedData()
5. New dataframe is constructed as :
NumericImputedData(now don't have any missing values) + X[ cat_list ](still have missing values) missing values)
6. Now missing values of Categorical columns are imputed by calling function : getCategoricalImputedData()
7. At this point calculation of missing vales of all columns is completed.
8. While imputing any columns, corresponding entry is made in fill_dict as:
key : column name
value : array of predicted values for missing cells
***This dictionary will be used in transform function to impute the missing values.
Local Variables:
main_df (pd.Dataframe) = copy of original dataframe(having missing values)
X_backup (pd.Dataframe) = It will be used as backup of dataframe: X
cat_list (List of Strings) = List of categorical columns
num_list (List of Strings) = List of numeric columns
num_as_cat_list = numeric columns to be considered as categorical (provided by user)
X_NumericAsCategoricalImputed(pd.Dataframe) = dataframe having only num_as_cat_list cols with imputed missing values
X_NumericImputed (pd.Dataframe) = dataframe having only num_list cols with imputed missing values
main_df_NumericImputed (pd.Dataframe) = dataframe having Numeric cols(now don't have values) + Categorical cols(having missing values)
"""
# Return if not imputing
if self.impute is False:
return self
if self.tunedRandomForest==True and self.verbose==True:
print("\nNote: Missing value imputation is being performed using Gridsearch and Cross-validation of ML models.")
print(" It may take longer time depending on size of data and number of column having missing values.\n")
num_nans = X.isnull().sum().sum()
num_total = X.shape[0] * X.shape[1]
percentage_imputed = num_nans / num_total * 100
# Grab list of object column names before doing imputation
self.object_columns = X.select_dtypes(include=['object']).columns.values
#Replacing None by NaN, if any None is present in X
X.fillna( value=np.nan, inplace=True)
# Seperating all columns into Categorical and Numeric
cat_list=[]
num_list=[]
num_as_cat_list = self.numeric_columns_as_categorical
#checking whether all the columns mentioned in num_as_cat_list are present in X or not
if( num_as_cat_list is not None ):
for col in num_as_cat_list:
if( col not in list(X.columns) ):
raise HealthcareAIError('Column = {} mentioned in numeric_columns_as_categorical is not present in dataframe'.format(col))
# segregating columns other than num_as_cat_list, into cat_list and num_list
for c in X:
if( (num_as_cat_list is None) or (num_as_cat_list is not None and c not in num_as_cat_list) ):
if( X[c].dtype == np.dtype('O') or pd.api.types.is_categorical_dtype(X[c]) ):
cat_list.append( c )
else:
num_list.append( c )
# Getting only num_as_cat_list columns with imputed missing values
# Also getNumericAsCategoricalImputedData() will internally fill the calculated imputation values along with column names in fill_dict
if( num_as_cat_list is not None ):
X_NumericAsCategoricalImputed = self.getNumericAsCategoricalImputedData( X = X[ num_as_cat_list ],
num_as_cat_list = num_as_cat_list )
X = X[ cat_list+num_list ].join( X_NumericAsCategoricalImputed, how='outer' ).copy()
# Creating base copy of original Dataframe as 'main_df'
main_df = X.copy()
X_backup = X.copy()
#--------------------------------------------------------------------------------------------------------------------------
# Getting only Numeric columns with imputed missing values
# Also getNumericImputedData() will internally fill the predicted imputation values along with column names in fil_dict
X_NumericImputed = self.getNumericImputedData( main_df=main_df.copy(), X=X.copy(), cat_list=cat_list, num_list=num_list )
# main_df_NumericImputed = X_NumericImputed + CategoricalColumns
main_df_NumericImputed = X_NumericImputed.join( main_df[ cat_list ], how='outer').copy()
X_backup = main_df_NumericImputed.copy()
X = main_df_NumericImputed.copy()
# Getting only Categoric columns with imputed missing values
# Also getCategoricalImputedData() will internally fill the predicted imputation values along with column names in fil_dict
X_CategoricImputed = self.getCategoricalImputedData( main_df=main_df.copy(), X_NumericImputed=X_NumericImputed.copy(), X=X, cat_list=cat_list, num_list=num_list )
X = main_df.copy()
#--------------------------------------------------------------------------------------------------------------------------
if self.verbose:
self.printFillDictReport( lenghth_X=len(X) )
print("Percentage Imputed: %.2f%%" % percentage_imputed)
print("Note: Impute will always happen on prediction dataframe, otherwise rows are dropped, and will lead "
"to missing predictions")
print("")
# return self for scikit compatibility
return self
def transform(self, X, y=None):
"""
Description:
------------
Missing value to be imputed are present in fill_dict.
1. Missing values of dataframe ae filled using self.fill_dict dictionary( updated in fill() function )
2. Columns captured in 'self.object_columns' during fill() function are ensured to be of dtype='object'
3. Columns specified in 'numeric_columns_as_categorical' are explicitly converted into dtype='object'
"""
# Return if not imputing
if self.impute is False:
return X
#Replacing None by NaN, if any None is present in X
X.fillna( value=np.nan, inplace=True)
#Now filling up the missing values in X using fill_dict(which was updated in fit() function)
for colName, imputeData in self.fill_dict.items():
if( colName in X.columns ):
X.loc[ X[ colName ].isnull(), colName ] = imputeData
for i in self.object_columns:
if X[i].dtype not in ['object', 'category']:
X[i] = X[i].astype('object')
if( self.numeric_columns_as_categorical is not None ):
for col in self.numeric_columns_as_categorical:
X[col] = X[col].astype( dtype='object', copy=True, error='raise' )
return X
def printFillDictReport( self, lenghth_X ):
header_names = [ 'Column Name', ' Number of\nmissing values', '% missing\n values', 'Top 3 impute values']
print_data = []
for colName, imputeData in self.fill_dict.items():
length_imputeData = len(self.fill_dict[colName])
sample_imputeData = imputeData[0:3]
percentage_missing = length_imputeData/lenghth_X
percentage_missing = "{:.2%}".format(percentage_missing)
print_data.append( [colName, str(length_imputeData), percentage_missing, sample_imputeData] )
table = tabulate( tabular_data=print_data, headers=header_names, tablefmt='fancy_grid', stralign='left', numalign='left')
print(table)
print("")
def getNumericAsCategoricalImputedData( self, X, num_as_cat_list):
"""
This function do below operations on num_as_cat_list columns:
1. Calculate and impute the missing values using Mode value of each column.
2. Update the calculated missing values in fill_dict. It will be used in transform() function.
"""
for col in list( X.columns ):
# If there is no null values in the column, skip current iteration
if ( X[ col ].isnull().values.any()==False):
continue
# if column type is already categorical, raise exception
if ( X[col].dtype == np.dtype('O') or pd.api.types.is_categorical_dtype(X[col]) ):
raise HealthcareAIError("Column type of '{}' is already categorical, but it is mentioned in numeric_columns_as_categorical={}".format(col, self.numeric_columns_as_categorical) )
imputeValue = X[col].value_counts().index[0]
imputeData = np.array( object=[ imputeValue for i in range( X[col].isna().sum() ) ], dtype=np.int64 )
self.fill_dict[col] = imputeData
X[col].fillna( value=imputeValue, inplace=True )
if self.verbose:
print("Missing value imputation completed for column: \'{}\'".format(col))
return X
def getNumericImputedData( self, main_df, X, cat_list, num_list ):
"""
Impute missing values in Numeric cols of dataframe.
Args:
main_df (pd.Dataframe) = copy of original dataframe(having missing values)
X (pd.Dataframe) = Dataframe on which operations will be performed
cat_list (List of Strings) = List of categorical columns
num_list (List of Strings) = List of numeric columns
Local Vars:
to_impute (List) = column in which missing values are to be imputed.
Column name in list form is easy to use while indexing the dataframe.
If at any place column name is required in String form we can use 'to_impute[0]'
to_impute[0] (String)= column name in which missing values are to be imputed
all_columns (List) = List of all columns
predictor_columns (List) = List of columns to be used for predicting the missing values in to_impute[0] column
Return:
X_NumericImputed (pd.Dataframe) = have ONLY numeric columns with imputed missing values
Also fill_dict gets updated for each Numeric columns having missing values
Steps:
1. Whole process will run in a loop. This loop will run once for every col in num_list.
2. That col will be the col under consideration in which missing values(if present) are to be imputed.
3. find predictor_columns
4. Temporarily impute missing values in predictor_columns using mean/mode as part of pre-data processing
( predictions will be done only for to_impute col.)
5. Now append to_impute col to this dataframe
6. Create dummy variables
7. update the predictor_columns list
8. Data pre-processing is completed, get the predictions of missing values in to_impute col
9. Add the col name(to_impute) and predicted of missing values in fill_dict
10. *** Also imputing the missing values of this column so that in next iteration there will
be lesser number of missing values in dataframe(X) that need to be temp imputed using
mean/mode (i.e step 4)
"""
X_backup = X
to_impute = []
all_columns = []
predictor_columns = []
for i in num_list:
X = X_backup
to_impute = [i]
# Check if to_impute col have any NaN Values. If no NaN values, no need to do imputation for this column
if ( X[ to_impute ].isnull().values.any()==False):
#print("No Nul values in = {} column, skipping the imputation loop".format( [i] ) )
continue
all_columns = list(X.columns)
predictor_columns = all_columns.copy()
predictor_columns.remove( to_impute[0] )
X = X[ predictor_columns ]
# Temporarily impute the missing values in X (using Mean and Median)
# Note: After every iteration we will have 1 col less that is to be imputed using MeanMedian beacuse we are
# imputing 1 columns per iteration using RandomForest and adding it to X
X = self.getTempImutedData( X )
# As we didnt imputed mising values of to_impute col (since they are to be imputed using RandomForest)
# Now joining to_impute col(having NaN values back to X)
X = X.join( main_df[ to_impute ], how='outer')
# Converting Categorical Cols --> to Numeric so that they can be feeded to ML model
columns_to_dummify = list(X.select_dtypes(include=[object, 'category']))
X = pd.get_dummies(X, columns=columns_to_dummify, drop_first=True, prefix_sep='.')
# Since some new cols are created after get_dummies, updating the predictor_columns List
predictor_columns = list(X.columns)
predictor_columns.remove(to_impute[0])
# Get the predicted values for missing data
y_pred_main = self.getImputePredictions(X=X, predictor_columns=predictor_columns, to_impute=to_impute, toImputeType='numeric' )
# add the predicted imputation data to the fill_dict
self.fill_dict[ to_impute[0] ] = y_pred_main
# updating the imputed NaN of to_impute col in, X_backup
# Now in next iteration this X_backup will be used as base DF
X_backup.loc[ X_backup[ to_impute[0] ].isnull(), to_impute[0] ] = self.fill_dict[ to_impute[0] ]
if self.verbose:
print("Missing value imputation completed for column: \'{}\'".format(to_impute[0]))
X_NumericImputed = X_backup[ num_list ].copy()
return X_NumericImputed
def getCategoricalImputedData( self, main_df, X_NumericImputed, X, cat_list, num_list ):
"""
Impute missing values in Categorical cols of dataframe.
Args:
main_df (pd.Dataframe) = copy of original dataframe(having missing values)
X (pd.Dataframe) = Dataframe on which operations will be performed
cat_list (List of Strings) = List of categorical columns
num_list (List of Strings) = List of numeric columns
Local Vars:
to_impute (List) = column in which missing values are to be imputed.
Column name in list form is easy to use while indexing the dataframe.
If at any place column name is required in String form we can use 'to_impute[0]'
to_impute[0] (String)= column name in which missing values are to be imputed
all_columns (List) = List of all columns
predictor_columns (List) = List of columns to be used for predicting the missing values in to_impute[0] column
Return:
(pd.Dataframe) having ONLY categorical columns with imputed missing values
Also fill_dict is getting updated for each Numeric columns having missing values
Steps:
1. Whole process will run in a loop. This loop will run once for every col in num_list.
2. That col will be the col under consideration in which missing values(if present) are to be imputed.
3. find predictor_columns
4. Temporarily impute missing values in predictor_columns using mean/mode as part of pre-data processing
( predictions will be done only for to_impute col.)
5. Now join X_NumericImputed and to_impute col to this dataframe
6. Create dummy variables( Also excluding the to_impute col, as it is also a categoric col )
7. Update the predictor_columns list
8. Since to_impute col is categorical, converting it into indexed form.
9. Data pre-processing is completed, get the predictions of missing values in to_impute col
Note: Here y_pred_main is in indexed form which needs to be converted back to original values.
10. *** Imputing the missing values of this column so that in next iteration there will
be lesser number of missing values in dataframe(X) that need to be temp imputed using
mean/mode (i.e step 4)
Also converting back the indexed version of to_impute to its original categoric values.
11. Add the col name(to_impute) and predicted missing values in fill_dicts
"""
X_backup = X
to_impute = []
all_columns = []
predictor_columns = []
for i in cat_list:
X = X_backup
to_impute = [i]
# Check if to_impute col have any NaN Values. If no NaN values, no need to do imputation for this column
if ( X[ to_impute ].isnull().values.any()==False):
#print("No Nul values in = {} column, skipping the imputation loop".format( [i] ) )
continue
all_columns = list(X.columns)
predictor_columns = all_columns.copy()
predictor_columns.remove( to_impute[0] )
# tempImpute_columns = List of cols to be imputed temporarily using mode
# i.e ( cat_list - to_impute[0] )
# to_impute is removed beacuse missing values of this column are to be perdicted using ML model
# We are passing only cat_list cols to getTempImutedData() func and later joining it with X_NumericImputed df, so that getTempImutedData() func will take minimum time for temporary imputation and it wil not iterate on the columns whose missing values are already imputed(i.e Numeric Cols)
tempImpute_columns = cat_list.copy()
tempImpute_columns.remove( to_impute[0] )
X = X[ tempImpute_columns ]
# Temporarily impute the missing values in X (using Mean and Mode)
# Note: After every iteration we will have 1 col less that is to be imputed using MeanMode beacuse we are
# imputing 1 columns per iteration using RandomForest and ading it to X
X = self.getTempImutedData( X )
# X = X(tempImpute_columns) + X_NumericImputed + main_df(to_impute)
# As we didnt imputed mising values of to_impute col (since they are to be imputed using ML)
# Now joining to_impute col(having NaN values back to X)
X = X.join( X_NumericImputed, how='outer')
X = X.join( main_df[ to_impute ], how='outer')
# Converting Categorical Cols --> to indexed numeric so that they can be feeded to ML model
# columns_to_dummify = columns_to_dummify - to_impute[0], as to_impute is a categorcal but it is to be imputed using ML model
columns_to_dummify = list(X.select_dtypes(include=[object, 'category']))
if( to_impute[0] in columns_to_dummify ):
columns_to_dummify.remove( to_impute[0] )
else:
raise HealthcareAIError( "Col to_impute = {} not found in columns_to_dummify = {}".format( to_impute[0], str(columns_to_dummify) ) )
X = pd.get_dummies(X, columns=columns_to_dummify, drop_first=True, prefix_sep='.')
# Since some new cols are created after get_dummies, updating the predictor_columns List
predictor_columns = list(X.columns)
predictor_columns.remove(to_impute[0])
# Sice target col i.e to_impute[0] is a categorical feature, we have to convert it into indexed format(i.e 0,1,2)
# from_List = original catregories ex.( A, B, C ... )
# to_List = indexed values ex.( 0, 1, 2 ... )
target_column = to_impute[0]
from_List = list( X[target_column].unique() )
# removing NaN values from from_List( beacuse to_impute columns have missing values as well )
if np.NaN in from_List:
from_List.remove(np.NaN)
elif (np.isnan( from_List ).any() ):
#from_List.remove( np.NaN )
from_List = [i for i in from_List if str(i) != 'nan']
else:
raise HealthcareAIError( "Null values didn't captured properly for col = {}, having unique values as = {}".format( to_impute[0], from_List ) )
#Ensuring that each value in from_list is of type String (as to_impute is a categorical col and from_list have the unique values of to_impute col)
from_List = list( map( str, from_List) )
from_List.sort()
#creating indexes version of from_List values
to_List = [ i for i in range( 0,len(from_List) ) ]
X[ target_column] = X[ target_column ].replace( from_List, to_List, inplace=False)
# Get the predicted values for missing data
y_pred_main = self.getImputePredictions( X=X, predictor_columns=predictor_columns, to_impute=to_impute, toImputeType='categorical' )
# updating the imputed values of to_impute col in, X_backup
# Now in next iteration this X_backup will be used as base Dataframe(X)
X_backup.loc[ X_backup[ to_impute[0] ].isnull(), to_impute[0] ] = y_pred_main
# Reconverting the idexed-to_impute column into its original form
from_List, to_List = to_List, from_List
X_backup[ to_impute] = X_backup[ to_impute ].replace( from_List, to_List, inplace=False)
# add the imputation data to the fill_dict
# For that first we have to covert y_pred_main(indexed version. ex. 1,2..) --> into actual version( ex. A, B...)
y_pred_main_df = pd.DataFrame( data=y_pred_main, columns=to_impute )
y_pred_main_df[ to_impute] = y_pred_main_df[ to_impute ].replace( from_List, to_List, inplace=False)
self.fill_dict[ to_impute[0] ] = y_pred_main_df[ to_impute[0] ].values
if self.verbose:
print("Missing value imputation completed for column: \'{}\'".format(to_impute[0]))
X_CategoricImputed = X_backup[ cat_list ].copy()
return X_CategoricImputed
def getTempImutedData( self, X ):
"""
This function is used for temporary imputation of missing values and impute missing
values in a dataframe using Mean and Mode .
Actual imputation is done(in to_impute col) by generating prediction using ML model,
but before creating ML model, during data pre-processing, there might be missing values
in the other columns(i.e other than to_impute col), so for time being those values are
imputed using MeanMode Strategy.
Columns of dtype object or category (assumed categorical)
= imputed with the mode (most frequent value in column).
Columns of other types (assumed continuous
= imputed with mean of column.
"""
object_columns = X.select_dtypes(include=['object']).columns.values
fill = pd.Series( [ X[c].value_counts().index[0] if X[c].dtype == np.dtype('O') or pd.api.types.is_categorical_dtype(X[c])
else X[c].mean()
for c in X
]
, index=X.columns)
result = X.fillna( fill )
for i in object_columns:
if result[i].dtype not in ['object', 'category']:
result[i] = result[i].astype('object')
return result.copy()
def getImputePredictions( self, X, predictor_columns, to_impute, toImputeType ):
"""
This method generate predictions of missing values
Args:
X (pd.Dataframe) = Input dataframe
predictor_columns (List) = List of input columns for ML model
to_impute (List) = List<Although it will always have single element> of output column for ML model
toImputeType (String) = type of column to be imputed
Return:
y_pred_main (numpy.array) = predicted values for missing cells
"""
# Seperating the mainDf into train(dont have NaN) and test(having NaN) data
train = X[ X[to_impute[0]].isnull()==False ]
test = X[ X[to_impute[0]].isnull() ]
# General X, y used for train test split
# ***X_main = DF based on which we have to predict the NaN of to_impute col
X = train[ predictor_columns ]
y = train[ to_impute[0] ].values
X_main = test[ predictor_columns ]
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=100 )
if( toImputeType=='numeric' ):
algo = RandomForestRegressor( random_state=100 )
elif( toImputeType=='categorical' ):
algo = RandomForestClassifier( random_state=100 )
else:
raise HealthcareAIError("invalid toImputeType selected, select any of these : [ numeric, categorical ]")
#tunedRandomForest = True
if( self.tunedRandomForest==True ):
algo = self.getTunedModel( baseModel=algo )
fit_algo = algo.fit( X_train, y_train )
#print( fit_algo.best_score_ )
#print( fit_algo.best_params_ )
#y_pred = fit_algo.predict( X_test )
y_pred_main = fit_algo.predict( X_main )
return y_pred_main.copy()
def getTunedModel( self, baseModel ):
n_estimators = [100, 200, 300, 400, 500]
max_features = ['auto', 'sqrt']
max_depth = [5, 10, 20, 30, 40, 50]
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
#print(random_grid)
model_tuned = RandomizedSearchCV(estimator = baseModel, param_distributions = random_grid, n_iter = 2, cv = 2, verbose=0, random_state=100 , n_jobs = -1)
return model_tuned
######################################################################################################################################
class DataFrameConvertTargetToBinary(TransformerMixin):
# TODO Note that this makes healthcareai only handle N/Y in pred column
"""
Convert classification model's predicted col to 0/1 (otherwise won't work with GridSearchCV). Passes through data
for regression models unchanged. This is to simplify the data pipeline logic. (Though that may be a more appropriate
place for the logic...)
Note that this makes healthcareai only handle N/Y in pred column
"""
def __init__(self, model_type, target_column):
self.model_type = model_type
self.target_column = target_column
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# TODO: put try/catch here when type = class and predictor is numeric
# TODO this makes healthcareai only handle N/Y in pred column
if self.model_type == 'classification':
# Turn off warning around replace
pd.options.mode.chained_assignment = None # default='warn'
# Replace 'Y'/'N' with 1/0
X[self.target_column].replace(['Y', 'N'], [1, 0], inplace=True)
return X
class DataFrameCreateDummyVariables(TransformerMixin):
"""Convert all categorical columns into dummy/indicator variables. Exclude given columns."""
def __init__(self, excluded_columns=None):
self.excluded_columns = excluded_columns
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
columns_to_dummify = list(X.select_dtypes(include=[object, 'category']))
# remove excluded columns (if they are still in the list)
for column in columns_to_dummify:
if column in self.excluded_columns:
columns_to_dummify.remove(column)
# Create dummy variables
X = pd.get_dummies(X, columns=columns_to_dummify, drop_first=True, prefix_sep='.')
return X
class DataFrameConvertColumnToNumeric(TransformerMixin):
"""Convert a column into numeric variables."""
def __init__(self, column_name):
self.column_name = column_name
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
X[self.column_name] = pd.to_numeric(arg=X[self.column_name], errors='raise')
return X
class DataFrameUnderSampling(TransformerMixin):
"""
Performs undersampling on a dataframe.
Must be done BEFORE train/test split so that when we split the under/over sampled dataset.
Must be done AFTER imputation, since under/over sampling will not work with missing values (imblearn requires target
column to be converted to numerical values)
"""
def __init__(self, predicted_column, random_seed=0):
self.random_seed = random_seed
self.predicted_column = predicted_column
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# TODO how do we validate this happens before train/test split? Or do we need to? Can we implement it in the
# TODO simple trainer in the correct order and leave this to advanced users?
# Extract predicted column
y = np.squeeze(X[[self.predicted_column]])
# Copy the dataframe without the predicted column
temp_dataframe = X.drop([self.predicted_column], axis=1)
# Initialize and fit the under sampler
under_sampler = RandomUnderSampler(random_state=self.random_seed)
x_under_sampled, y_under_sampled = under_sampler.fit_sample(temp_dataframe, y)
# Build the resulting under sampled dataframe
result = pd.DataFrame(x_under_sampled)
# Restore the column names
result.columns = temp_dataframe.columns
# Restore the y values
y_under_sampled = pd.Series(y_under_sampled)
result[self.predicted_column] = y_under_sampled
return result
class DataFrameOverSampling(TransformerMixin):
"""
Performs oversampling on a dataframe.
Must be done BEFORE train/test split so that when we split the under/over sampled dataset.
Must be done AFTER imputation, since under/over sampling will not work with missing values (imblearn requires target
column to be converted to numerical values)
"""
def __init__(self, predicted_column, random_seed=0):
self.random_seed = random_seed
self.predicted_column = predicted_column
def fit(self, X, y=None):
# return self for scikit compatibility
return self
def transform(self, X, y=None):
# TODO how do we validate this happens before train/test split? Or do we need to? Can we implement it in the
# TODO simple trainer in the correct order and leave this to advanced users?
# Extract predicted column
y = np.squeeze(X[[self.predicted_column]])
# Copy the dataframe without the predicted column
temp_dataframe = X.drop([self.predicted_column], axis=1)
# Initialize and fit the under sampler
over_sampler = RandomOverSampler(random_state=self.random_seed)
x_over_sampled, y_over_sampled = over_sampler.fit_sample(temp_dataframe, y)
# Build the resulting under sampled dataframe
result = pd.DataFrame(x_over_sampled)
# Restore the column names
result.columns = temp_dataframe.columns
# Restore the y values
y_over_sampled = pd.Series(y_over_sampled)
result[self.predicted_column] = y_over_sampled
return result
class DataFrameDropNaN(TransformerMixin):
"""Remove NaN values. Columns that are NaN or None are removed."""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Uses pandas.DataFrame.dropna function where axis=1 is column action, and
# how='all' requires all the values to be NaN or None to be removed.
return X.dropna(axis=1, how='all')
class DataFrameFeatureScaling(TransformerMixin):
"""Scales numeric features. Columns that are numerics are scaled, or otherwise specified."""
def __init__(self, columns_to_scale=None, reuse=None):
self.columns_to_scale = columns_to_scale
self.reuse = reuse
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
# Check if it's reuse, if so, then use the reuse's DataFrameFeatureScaling
if self.reuse:
return self.reuse.fit_transform(X, y)
# Check if we know what columns to scale, if not, then get all the numeric columns' names
if not self.columns_to_scale:
self.columns_to_scale = list(X.select_dtypes(include=[np.number]).columns)
X[self.columns_to_scale] = StandardScaler().fit_transform(X[self.columns_to_scale])
return X
| HealthCatalystSLC/healthcareai-py | healthcareai/common/transformers.py | Python | mit | 46,645 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.